2024-11-23 03:20:35,135 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 03:20:35,150 main DEBUG Took 0.013105 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 03:20:35,150 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 03:20:35,151 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 03:20:35,151 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 03:20:35,153 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,161 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 03:20:35,175 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,178 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,178 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,179 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,179 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,181 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,181 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,182 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,182 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,183 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,184 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,184 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,185 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,185 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,186 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,187 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,187 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,188 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,188 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,189 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,189 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,190 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,190 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 03:20:35,191 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,191 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 03:20:35,193 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 03:20:35,195 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 03:20:35,197 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 03:20:35,198 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 03:20:35,199 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 03:20:35,200 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 03:20:35,209 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 03:20:35,212 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 03:20:35,213 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 03:20:35,214 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 03:20:35,214 main DEBUG createAppenders(={Console}) 2024-11-23 03:20:35,215 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-23 03:20:35,215 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 03:20:35,216 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-23 03:20:35,216 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 03:20:35,217 main DEBUG OutputStream closed 2024-11-23 03:20:35,217 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 03:20:35,217 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 03:20:35,218 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-23 03:20:35,290 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 03:20:35,293 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 03:20:35,294 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 03:20:35,295 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 03:20:35,296 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 03:20:35,296 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 03:20:35,297 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 03:20:35,297 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 03:20:35,297 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 03:20:35,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 03:20:35,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 03:20:35,299 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 03:20:35,299 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 03:20:35,299 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 03:20:35,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 03:20:35,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 03:20:35,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 03:20:35,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 03:20:35,303 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 03:20:35,304 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-23 03:20:35,304 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 03:20:35,305 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-23T03:20:35,529 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655 2024-11-23 03:20:35,532 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 03:20:35,533 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T03:20:35,541 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-23T03:20:35,559 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T03:20:35,562 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e, deleteOnExit=true 2024-11-23T03:20:35,563 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-23T03:20:35,563 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/test.cache.data in system properties and HBase conf 2024-11-23T03:20:35,564 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T03:20:35,564 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.log.dir in system properties and HBase conf 2024-11-23T03:20:35,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T03:20:35,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T03:20:35,566 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-23T03:20:35,657 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T03:20:35,753 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T03:20:35,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T03:20:35,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T03:20:35,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T03:20:35,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T03:20:35,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T03:20:35,759 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T03:20:35,759 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T03:20:35,760 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T03:20:35,760 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T03:20:35,760 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/nfs.dump.dir in system properties and HBase conf 2024-11-23T03:20:35,761 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/java.io.tmpdir in system properties and HBase conf 2024-11-23T03:20:35,761 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T03:20:35,761 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T03:20:35,762 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T03:20:36,608 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T03:20:36,695 INFO [Time-limited test {}] log.Log(170): Logging initialized @2305ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T03:20:36,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T03:20:36,846 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T03:20:36,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T03:20:36,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T03:20:36,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T03:20:36,885 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T03:20:36,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.log.dir/,AVAILABLE} 2024-11-23T03:20:36,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T03:20:37,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/java.io.tmpdir/jetty-localhost-44307-hadoop-hdfs-3_4_1-tests_jar-_-any-15692534145484608031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T03:20:37,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:44307} 2024-11-23T03:20:37,099 INFO [Time-limited test {}] server.Server(415): Started @2709ms 2024-11-23T03:20:37,502 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T03:20:37,509 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T03:20:37,510 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T03:20:37,510 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T03:20:37,510 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T03:20:37,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.log.dir/,AVAILABLE} 2024-11-23T03:20:37,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T03:20:37,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/java.io.tmpdir/jetty-localhost-41413-hadoop-hdfs-3_4_1-tests_jar-_-any-12315861518681335514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T03:20:37,633 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:41413} 2024-11-23T03:20:37,634 INFO [Time-limited test {}] server.Server(415): Started @3244ms 2024-11-23T03:20:37,691 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T03:20:38,135 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/dfs/data/data1/current/BP-1790657337-172.17.0.2-1732332036355/current, will proceed with Du for space computation calculation, 2024-11-23T03:20:38,135 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/dfs/data/data2/current/BP-1790657337-172.17.0.2-1732332036355/current, will proceed with Du for space computation calculation, 2024-11-23T03:20:38,178 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T03:20:38,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x482e6acdf31f00db with lease ID 0x534ac1c302a37cf6: Processing first storage report for DS-914184f4-e749-4d03-8fdd-ef53d50ec5dc from datanode DatanodeRegistration(127.0.0.1:38243, datanodeUuid=09c68cfc-715e-4d36-8e8f-79b3dfd79368, infoPort=35009, infoSecurePort=0, ipcPort=38185, storageInfo=lv=-57;cid=testClusterID;nsid=1526148322;c=1732332036355) 2024-11-23T03:20:38,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x482e6acdf31f00db with lease ID 0x534ac1c302a37cf6: from storage DS-914184f4-e749-4d03-8fdd-ef53d50ec5dc node DatanodeRegistration(127.0.0.1:38243, datanodeUuid=09c68cfc-715e-4d36-8e8f-79b3dfd79368, infoPort=35009, infoSecurePort=0, ipcPort=38185, storageInfo=lv=-57;cid=testClusterID;nsid=1526148322;c=1732332036355), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-23T03:20:38,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x482e6acdf31f00db with lease ID 0x534ac1c302a37cf6: Processing first storage report for DS-8fe3f57d-d4fb-44dc-b028-bdb843e0e062 from datanode DatanodeRegistration(127.0.0.1:38243, datanodeUuid=09c68cfc-715e-4d36-8e8f-79b3dfd79368, infoPort=35009, infoSecurePort=0, ipcPort=38185, storageInfo=lv=-57;cid=testClusterID;nsid=1526148322;c=1732332036355) 2024-11-23T03:20:38,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x482e6acdf31f00db with lease ID 0x534ac1c302a37cf6: from storage DS-8fe3f57d-d4fb-44dc-b028-bdb843e0e062 node DatanodeRegistration(127.0.0.1:38243, datanodeUuid=09c68cfc-715e-4d36-8e8f-79b3dfd79368, infoPort=35009, infoSecurePort=0, ipcPort=38185, storageInfo=lv=-57;cid=testClusterID;nsid=1526148322;c=1732332036355), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T03:20:38,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655 2024-11-23T03:20:38,369 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/zookeeper_0, clientPort=61411, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T03:20:38,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=61411 2024-11-23T03:20:38,394 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:38,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741825_1001 (size=7) 2024-11-23T03:20:39,076 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 with version=8 2024-11-23T03:20:39,077 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/hbase-staging 2024-11-23T03:20:39,205 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T03:20:39,472 INFO [Time-limited test {}] client.ConnectionUtils(129): master/0d51875c74df:0 server-side Connection retries=45 2024-11-23T03:20:39,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,492 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T03:20:39,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T03:20:39,624 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T03:20:39,683 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T03:20:39,692 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T03:20:39,696 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T03:20:39,724 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22752 (auto-detected) 2024-11-23T03:20:39,725 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T03:20:39,745 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39215 2024-11-23T03:20:39,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:39,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:39,767 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:39215 connecting to ZooKeeper ensemble=127.0.0.1:61411 2024-11-23T03:20:39,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392150x0, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T03:20:39,799 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39215-0x1002264e1b00000 connected 2024-11-23T03:20:39,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T03:20:39,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T03:20:39,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T03:20:39,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39215 2024-11-23T03:20:39,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39215 2024-11-23T03:20:39,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39215 2024-11-23T03:20:39,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39215 2024-11-23T03:20:39,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39215 2024-11-23T03:20:39,852 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417, hbase.cluster.distributed=false 2024-11-23T03:20:39,938 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0d51875c74df:0 server-side Connection retries=45 2024-11-23T03:20:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,938 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T03:20:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T03:20:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T03:20:39,941 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T03:20:39,943 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T03:20:39,944 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34141 2024-11-23T03:20:39,945 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T03:20:39,951 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T03:20:39,952 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:39,955 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:39,958 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:34141 connecting to ZooKeeper ensemble=127.0.0.1:61411 2024-11-23T03:20:39,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341410x0, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T03:20:39,961 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:341410x0, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T03:20:39,962 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34141-0x1002264e1b00001 connected 2024-11-23T03:20:39,963 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T03:20:39,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T03:20:39,965 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34141 2024-11-23T03:20:39,965 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34141 2024-11-23T03:20:39,965 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34141 2024-11-23T03:20:39,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34141 2024-11-23T03:20:39,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34141 2024-11-23T03:20:39,969 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/0d51875c74df,39215,1732332039198 2024-11-23T03:20:39,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T03:20:39,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T03:20:39,977 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d51875c74df,39215,1732332039198 2024-11-23T03:20:39,984 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d51875c74df:39215 2024-11-23T03:20:39,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T03:20:39,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T03:20:39,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:39,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,000 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T03:20:40,001 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T03:20:40,001 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d51875c74df,39215,1732332039198 from backup master directory 2024-11-23T03:20:40,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d51875c74df,39215,1732332039198 2024-11-23T03:20:40,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T03:20:40,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T03:20:40,005 WARN [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T03:20:40,005 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d51875c74df,39215,1732332039198 2024-11-23T03:20:40,007 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T03:20:40,008 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T03:20:40,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741826_1002 (size=42) 2024-11-23T03:20:40,076 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/hbase.id with ID: 058ce063-4403-4da4-8bb9-44f36d2ad45e 2024-11-23T03:20:40,119 INFO [master/0d51875c74df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T03:20:40,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741827_1003 (size=196) 2024-11-23T03:20:40,180 INFO [master/0d51875c74df:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:20:40,182 INFO [master/0d51875c74df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T03:20:40,201 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:40,207 INFO [master/0d51875c74df:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T03:20:40,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741828_1004 (size=1189) 2024-11-23T03:20:40,256 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store 2024-11-23T03:20:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741829_1005 (size=34) 2024-11-23T03:20:40,274 INFO [master/0d51875c74df:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T03:20:40,275 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:40,276 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T03:20:40,276 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:20:40,276 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:20:40,276 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T03:20:40,277 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:20:40,277 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:20:40,277 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T03:20:40,279 WARN [master/0d51875c74df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/.initializing 2024-11-23T03:20:40,279 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/WALs/0d51875c74df,39215,1732332039198 2024-11-23T03:20:40,287 INFO [master/0d51875c74df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T03:20:40,298 INFO [master/0d51875c74df:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d51875c74df%2C39215%2C1732332039198, suffix=, logDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/WALs/0d51875c74df,39215,1732332039198, archiveDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/oldWALs, maxLogs=10 2024-11-23T03:20:40,323 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/WALs/0d51875c74df,39215,1732332039198/0d51875c74df%2C39215%2C1732332039198.1732332040304, exclude list is [], retry=0 2024-11-23T03:20:40,341 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38243,DS-914184f4-e749-4d03-8fdd-ef53d50ec5dc,DISK] 2024-11-23T03:20:40,344 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T03:20:40,381 INFO [master/0d51875c74df:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/WALs/0d51875c74df,39215,1732332039198/0d51875c74df%2C39215%2C1732332039198.1732332040304 2024-11-23T03:20:40,382 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-23T03:20:40,382 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:20:40,383 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:40,387 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,388 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T03:20:40,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:40,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:40,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T03:20:40,463 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:40,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:40,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T03:20:40,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:40,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:40,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T03:20:40,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:40,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:40,475 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,477 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,485 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T03:20:40,488 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T03:20:40,493 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:20:40,494 INFO [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58822051, jitterRate=-0.12348313629627228}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T03:20:40,499 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T03:20:40,500 INFO [master/0d51875c74df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T03:20:40,529 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a115859, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:40,564 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-23T03:20:40,576 INFO [master/0d51875c74df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T03:20:40,576 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T03:20:40,578 INFO [master/0d51875c74df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T03:20:40,580 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T03:20:40,585 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-23T03:20:40,585 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T03:20:40,617 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T03:20:40,631 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T03:20:40,633 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-23T03:20:40,635 INFO [master/0d51875c74df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T03:20:40,636 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T03:20:40,638 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-23T03:20:40,640 INFO [master/0d51875c74df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T03:20:40,643 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T03:20:40,644 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-23T03:20:40,645 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T03:20:40,647 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T03:20:40,656 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T03:20:40,657 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T03:20:40,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T03:20:40,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T03:20:40,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,662 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=0d51875c74df,39215,1732332039198, sessionid=0x1002264e1b00000, setting cluster-up flag (Was=false) 2024-11-23T03:20:40,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,679 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T03:20:40,680 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d51875c74df,39215,1732332039198 2024-11-23T03:20:40,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:40,690 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T03:20:40,691 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d51875c74df,39215,1732332039198 2024-11-23T03:20:40,773 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-23T03:20:40,779 INFO [master/0d51875c74df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-23T03:20:40,782 INFO [master/0d51875c74df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T03:20:40,782 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d51875c74df:34141 2024-11-23T03:20:40,784 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1008): ClusterId : 058ce063-4403-4da4-8bb9-44f36d2ad45e 2024-11-23T03:20:40,786 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T03:20:40,788 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d51875c74df,39215,1732332039198 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T03:20:40,791 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T03:20:40,791 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T03:20:40,791 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d51875c74df:0, corePoolSize=5, maxPoolSize=5 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d51875c74df:0, corePoolSize=5, maxPoolSize=5 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d51875c74df:0, corePoolSize=5, maxPoolSize=5 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d51875c74df:0, corePoolSize=5, maxPoolSize=5 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d51875c74df:0, corePoolSize=10, maxPoolSize=10 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,792 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d51875c74df:0, corePoolSize=2, maxPoolSize=2 2024-11-23T03:20:40,793 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,794 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T03:20:40,794 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732332070794 2024-11-23T03:20:40,794 DEBUG [RS:0;0d51875c74df:34141 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c1b8fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:40,795 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T03:20:40,796 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T03:20:40,797 DEBUG [RS:0;0d51875c74df:34141 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d59da1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d51875c74df/172.17.0.2:0 2024-11-23T03:20:40,798 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T03:20:40,798 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-23T03:20:40,800 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T03:20:40,800 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T03:20:40,800 INFO [RS:0;0d51875c74df:34141 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-23T03:20:40,800 INFO [RS:0;0d51875c74df:34141 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-23T03:20:40,800 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-23T03:20:40,800 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T03:20:40,801 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T03:20:40,801 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,802 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T03:20:40,803 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:40,803 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(3073): reportForDuty to master=0d51875c74df,39215,1732332039198 with isa=0d51875c74df/172.17.0.2:34141, startcode=1732332039937 2024-11-23T03:20:40,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T03:20:40,803 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T03:20:40,804 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T03:20:40,806 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T03:20:40,806 INFO [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T03:20:40,808 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.large.0-1732332040808,5,FailOnTimeoutGroup] 2024-11-23T03:20:40,808 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.small.0-1732332040808,5,FailOnTimeoutGroup] 2024-11-23T03:20:40,808 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,809 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T03:20:40,810 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,810 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741831_1007 (size=1039) 2024-11-23T03:20:40,820 DEBUG [RS:0;0d51875c74df:34141 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T03:20:40,856 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36283, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T03:20:40,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39215 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,864 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39215 {}] master.ServerManager(486): Registering regionserver=0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,877 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:20:40,877 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34981 2024-11-23T03:20:40,877 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-23T03:20:40,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T03:20:40,883 DEBUG [RS:0;0d51875c74df:34141 {}] zookeeper.ZKUtil(111): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,883 WARN [RS:0;0d51875c74df:34141 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T03:20:40,883 INFO [RS:0;0d51875c74df:34141 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T03:20:40,884 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,885 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d51875c74df,34141,1732332039937] 2024-11-23T03:20:40,902 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-23T03:20:40,918 INFO [RS:0;0d51875c74df:34141 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T03:20:40,931 INFO [RS:0;0d51875c74df:34141 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T03:20:40,934 INFO [RS:0;0d51875c74df:34141 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T03:20:40,934 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,935 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-23T03:20:40,942 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,942 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,942 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,942 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,942 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d51875c74df:0, corePoolSize=2, maxPoolSize=2 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,943 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d51875c74df:0, corePoolSize=1, maxPoolSize=1 2024-11-23T03:20:40,944 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d51875c74df:0, corePoolSize=3, maxPoolSize=3 2024-11-23T03:20:40,944 DEBUG [RS:0;0d51875c74df:34141 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0, corePoolSize=3, maxPoolSize=3 2024-11-23T03:20:40,944 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,945 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,945 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,945 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,945 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,34141,1732332039937-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T03:20:40,965 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T03:20:40,967 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,34141,1732332039937-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:40,988 INFO [RS:0;0d51875c74df:34141 {}] regionserver.Replication(204): 0d51875c74df,34141,1732332039937 started 2024-11-23T03:20:40,988 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1767): Serving as 0d51875c74df,34141,1732332039937, RpcServer on 0d51875c74df/172.17.0.2:34141, sessionid=0x1002264e1b00001 2024-11-23T03:20:40,989 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T03:20:40,989 DEBUG [RS:0;0d51875c74df:34141 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,989 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d51875c74df,34141,1732332039937' 2024-11-23T03:20:40,989 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T03:20:40,990 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T03:20:40,990 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T03:20:40,991 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T03:20:40,991 DEBUG [RS:0;0d51875c74df:34141 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d51875c74df,34141,1732332039937 2024-11-23T03:20:40,991 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d51875c74df,34141,1732332039937' 2024-11-23T03:20:40,991 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T03:20:40,991 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T03:20:40,992 DEBUG [RS:0;0d51875c74df:34141 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T03:20:40,992 INFO [RS:0;0d51875c74df:34141 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T03:20:40,992 INFO [RS:0;0d51875c74df:34141 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T03:20:41,098 INFO [RS:0;0d51875c74df:34141 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T03:20:41,102 INFO [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d51875c74df%2C34141%2C1732332039937, suffix=, logDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937, archiveDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/oldWALs, maxLogs=32 2024-11-23T03:20:41,119 DEBUG [RS:0;0d51875c74df:34141 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937/0d51875c74df%2C34141%2C1732332039937.1732332041104, exclude list is [], retry=0 2024-11-23T03:20:41,124 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38243,DS-914184f4-e749-4d03-8fdd-ef53d50ec5dc,DISK] 2024-11-23T03:20:41,128 INFO [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937/0d51875c74df%2C34141%2C1732332039937.1732332041104 2024-11-23T03:20:41,128 DEBUG [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-23T03:20:41,215 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-23T03:20:41,215 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:20:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741833_1009 (size=32) 2024-11-23T03:20:41,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:41,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T03:20:41,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T03:20:41,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T03:20:41,235 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T03:20:41,235 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T03:20:41,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T03:20:41,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740 2024-11-23T03:20:41,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740 2024-11-23T03:20:41,244 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:20:41,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T03:20:41,252 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:20:41,253 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75057859, jitterRate=0.11844925582408905}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:20:41,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T03:20:41,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T03:20:41,256 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T03:20:41,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T03:20:41,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T03:20:41,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T03:20:41,258 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T03:20:41,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T03:20:41,261 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T03:20:41,262 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-23T03:20:41,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T03:20:41,276 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T03:20:41,278 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T03:20:41,430 DEBUG [0d51875c74df:39215 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T03:20:41,435 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:41,439 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d51875c74df,34141,1732332039937, state=OPENING 2024-11-23T03:20:41,444 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T03:20:41,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:41,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:41,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T03:20:41,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T03:20:41,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:20:41,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:41,625 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T03:20:41,628 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T03:20:41,640 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-23T03:20:41,640 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T03:20:41,641 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T03:20:41,644 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d51875c74df%2C34141%2C1732332039937.meta, suffix=.meta, logDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937, archiveDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/oldWALs, maxLogs=32 2024-11-23T03:20:41,661 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937/0d51875c74df%2C34141%2C1732332039937.meta.1732332041646.meta, exclude list is [], retry=0 2024-11-23T03:20:41,665 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38243,DS-914184f4-e749-4d03-8fdd-ef53d50ec5dc,DISK] 2024-11-23T03:20:41,668 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/WALs/0d51875c74df,34141,1732332039937/0d51875c74df%2C34141%2C1732332039937.meta.1732332041646.meta 2024-11-23T03:20:41,669 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-23T03:20:41,669 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:20:41,670 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T03:20:41,729 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T03:20:41,734 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T03:20:41,738 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T03:20:41,738 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:41,739 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-23T03:20:41,739 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-23T03:20:41,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T03:20:41,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T03:20:41,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T03:20:41,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T03:20:41,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T03:20:41,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T03:20:41,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T03:20:41,751 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740 2024-11-23T03:20:41,753 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740 2024-11-23T03:20:41,756 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:20:41,759 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T03:20:41,761 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61481249, jitterRate=-0.08385799825191498}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:20:41,762 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T03:20:41,769 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732332041617 2024-11-23T03:20:41,780 DEBUG [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T03:20:41,781 INFO [RS_OPEN_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-23T03:20:41,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:41,784 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d51875c74df,34141,1732332039937, state=OPEN 2024-11-23T03:20:41,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T03:20:41,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T03:20:41,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T03:20:41,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T03:20:41,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T03:20:41,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=0d51875c74df,34141,1732332039937 in 340 msec 2024-11-23T03:20:41,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T03:20:41,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 525 msec 2024-11-23T03:20:41,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0760 sec 2024-11-23T03:20:41,803 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732332041803, completionTime=-1 2024-11-23T03:20:41,803 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T03:20:41,804 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-23T03:20:41,842 DEBUG [hconnection-0x3f04ca9-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:41,844 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:41,854 INFO [master/0d51875c74df:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-23T03:20:41,854 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732332101854 2024-11-23T03:20:41,854 INFO [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732332161854 2024-11-23T03:20:41,854 INFO [master/0d51875c74df:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 50 msec 2024-11-23T03:20:41,874 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:41,875 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:41,875 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:41,876 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d51875c74df:39215, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:41,877 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:41,881 DEBUG [master/0d51875c74df:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-23T03:20:41,884 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-23T03:20:41,885 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T03:20:41,891 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-23T03:20:41,894 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:20:41,895 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:41,897 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:20:41,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741835_1011 (size=358) 2024-11-23T03:20:42,312 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 083dc89e8b2b1c4aa6851e27c52fd159, NAME => 'hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:20:42,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741836_1012 (size=42) 2024-11-23T03:20:42,723 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:42,723 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 083dc89e8b2b1c4aa6851e27c52fd159, disabling compactions & flushes 2024-11-23T03:20:42,724 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:42,724 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:42,724 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. after waiting 0 ms 2024-11-23T03:20:42,724 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:42,724 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:42,724 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 083dc89e8b2b1c4aa6851e27c52fd159: 2024-11-23T03:20:42,726 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:20:42,733 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732332042728"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332042728"}]},"ts":"1732332042728"} 2024-11-23T03:20:42,756 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:20:42,758 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:20:42,761 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332042758"}]},"ts":"1732332042758"} 2024-11-23T03:20:42,764 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-23T03:20:42,769 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=083dc89e8b2b1c4aa6851e27c52fd159, ASSIGN}] 2024-11-23T03:20:42,771 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=083dc89e8b2b1c4aa6851e27c52fd159, ASSIGN 2024-11-23T03:20:42,773 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=083dc89e8b2b1c4aa6851e27c52fd159, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:20:42,924 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=083dc89e8b2b1c4aa6851e27c52fd159, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:42,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 083dc89e8b2b1c4aa6851e27c52fd159, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:20:43,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:43,088 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:43,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 083dc89e8b2b1c4aa6851e27c52fd159, NAME => 'hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:20:43,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:43,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,092 INFO [StoreOpener-083dc89e8b2b1c4aa6851e27c52fd159-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,095 INFO [StoreOpener-083dc89e8b2b1c4aa6851e27c52fd159-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 083dc89e8b2b1c4aa6851e27c52fd159 columnFamilyName info 2024-11-23T03:20:43,095 DEBUG [StoreOpener-083dc89e8b2b1c4aa6851e27c52fd159-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:43,095 INFO [StoreOpener-083dc89e8b2b1c4aa6851e27c52fd159-1 {}] regionserver.HStore(327): Store=083dc89e8b2b1c4aa6851e27c52fd159/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:43,097 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,097 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,101 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:20:43,104 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:20:43,105 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 083dc89e8b2b1c4aa6851e27c52fd159; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69195925, jitterRate=0.031099632382392883}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T03:20:43,106 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 083dc89e8b2b1c4aa6851e27c52fd159: 2024-11-23T03:20:43,108 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159., pid=6, masterSystemTime=1732332043081 2024-11-23T03:20:43,112 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:43,112 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:20:43,113 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=083dc89e8b2b1c4aa6851e27c52fd159, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:43,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T03:20:43,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 083dc89e8b2b1c4aa6851e27c52fd159, server=0d51875c74df,34141,1732332039937 in 189 msec 2024-11-23T03:20:43,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T03:20:43,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=083dc89e8b2b1c4aa6851e27c52fd159, ASSIGN in 351 msec 2024-11-23T03:20:43,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:20:43,126 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332043125"}]},"ts":"1732332043125"} 2024-11-23T03:20:43,128 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-23T03:20:43,132 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:20:43,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2460 sec 2024-11-23T03:20:43,195 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-23T03:20:43,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-23T03:20:43,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:43,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:20:43,226 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-23T03:20:43,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T03:20:43,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-23T03:20:43,250 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-23T03:20:43,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T03:20:43,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 14 msec 2024-11-23T03:20:43,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-23T03:20:43,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-23T03:20:43,279 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.274sec 2024-11-23T03:20:43,280 INFO [master/0d51875c74df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T03:20:43,282 INFO [master/0d51875c74df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T03:20:43,283 INFO [master/0d51875c74df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T03:20:43,283 INFO [master/0d51875c74df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T03:20:43,283 INFO [master/0d51875c74df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T03:20:43,284 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T03:20:43,285 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T03:20:43,292 DEBUG [master/0d51875c74df:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-23T03:20:43,292 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T03:20:43,293 INFO [master/0d51875c74df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d51875c74df,39215,1732332039198-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T03:20:43,390 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-23T03:20:43,391 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-23T03:20:43,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:43,404 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T03:20:43,404 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T03:20:43,417 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:43,426 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:43,435 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=0d51875c74df,39215,1732332039198 2024-11-23T03:20:43,448 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=148, ProcessCount=11, AvailableMemoryMB=5090 2024-11-23T03:20:43,459 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:20:43,462 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:20:43,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:20:43,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:20:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:20:43,478 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:20:43,478 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:43,480 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:20:43,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-23T03:20:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T03:20:43,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741837_1013 (size=960) 2024-11-23T03:20:43,493 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:20:43,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741838_1014 (size=53) 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 4dfbb59dd7b53f05c99615119ca9b6f4, disabling compactions & flushes 2024-11-23T03:20:43,502 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. after waiting 0 ms 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,502 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,502 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:43,504 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:20:43,505 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332043504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332043504"}]},"ts":"1732332043504"} 2024-11-23T03:20:43,507 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:20:43,509 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:20:43,509 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332043509"}]},"ts":"1732332043509"} 2024-11-23T03:20:43,512 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:20:43,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, ASSIGN}] 2024-11-23T03:20:43,518 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, ASSIGN 2024-11-23T03:20:43,520 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:20:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T03:20:43,670 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4dfbb59dd7b53f05c99615119ca9b6f4, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:43,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:20:43,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T03:20:43,828 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:43,834 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,834 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:20:43,835 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,835 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:20:43,835 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,835 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,837 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,841 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:20:43,841 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dfbb59dd7b53f05c99615119ca9b6f4 columnFamilyName A 2024-11-23T03:20:43,841 DEBUG [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:43,843 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(327): Store=4dfbb59dd7b53f05c99615119ca9b6f4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:43,843 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,845 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:20:43,845 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dfbb59dd7b53f05c99615119ca9b6f4 columnFamilyName B 2024-11-23T03:20:43,845 DEBUG [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:43,846 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(327): Store=4dfbb59dd7b53f05c99615119ca9b6f4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:43,846 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,848 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:20:43,849 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dfbb59dd7b53f05c99615119ca9b6f4 columnFamilyName C 2024-11-23T03:20:43,849 DEBUG [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:20:43,849 INFO [StoreOpener-4dfbb59dd7b53f05c99615119ca9b6f4-1 {}] regionserver.HStore(327): Store=4dfbb59dd7b53f05c99615119ca9b6f4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:20:43,850 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,851 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,852 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,854 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:20:43,856 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:43,859 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:20:43,860 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 4dfbb59dd7b53f05c99615119ca9b6f4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72513302, jitterRate=0.08053240180015564}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:20:43,861 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:43,863 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., pid=11, masterSystemTime=1732332043827 2024-11-23T03:20:43,866 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,866 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:43,867 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4dfbb59dd7b53f05c99615119ca9b6f4, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:20:43,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-23T03:20:43,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 in 196 msec 2024-11-23T03:20:43,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-23T03:20:43,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, ASSIGN in 357 msec 2024-11-23T03:20:43,877 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:20:43,877 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332043877"}]},"ts":"1732332043877"} 2024-11-23T03:20:43,880 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:20:43,883 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:20:43,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 410 msec 2024-11-23T03:20:44,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T03:20:44,098 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-23T03:20:44,103 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-23T03:20:44,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,109 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,111 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,114 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:20:44,116 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:20:44,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-23T03:20:44,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-23T03:20:44,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,133 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-23T03:20:44,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,138 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-23T03:20:44,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,144 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-23T03:20:44,147 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,148 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-23T03:20:44,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,153 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-23T03:20:44,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,157 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-23T03:20:44,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,161 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-23T03:20:44,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:20:44,173 DEBUG [hconnection-0x3bca7345-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,174 DEBUG [hconnection-0x176fe140-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,175 DEBUG [hconnection-0x21675e13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,177 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,177 DEBUG [hconnection-0x3b20eaa4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,178 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,178 DEBUG [hconnection-0x426356f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,178 DEBUG [hconnection-0x778c0741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,179 DEBUG [hconnection-0x4b800cbf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:44,181 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,185 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-23T03:20:44,188 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:44,190 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:44,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:44,196 DEBUG [hconnection-0x2697246c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,221 DEBUG [hconnection-0x6313e8f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:20:44,230 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,231 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,234 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,235 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,238 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:20:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:44,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:44,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T03:20:44,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:44,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,400 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:44,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:44,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 is 50, key is test_row_0/A:col10/1732332044290/Put/seqid=0 2024-11-23T03:20:44,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332104465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332104466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332104471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332104481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332104472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:44,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741839_1015 (size=12001) 2024-11-23T03:20:44,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 2024-11-23T03:20:44,600 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:44,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:44,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332104620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332104621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332104622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332104622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332104623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/ce5f5634966a4a289a0dc6b7c05270f1 is 50, key is test_row_0/B:col10/1732332044290/Put/seqid=0 2024-11-23T03:20:44,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741840_1016 (size=12001) 2024-11-23T03:20:44,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/ce5f5634966a4a289a0dc6b7c05270f1 2024-11-23T03:20:44,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/8e6ebc119faa4199996681960e48c530 is 50, key is test_row_0/C:col10/1732332044290/Put/seqid=0 2024-11-23T03:20:44,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741841_1017 (size=12001) 2024-11-23T03:20:44,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/8e6ebc119faa4199996681960e48c530 2024-11-23T03:20:44,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 2024-11-23T03:20:44,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:20:44,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/ce5f5634966a4a289a0dc6b7c05270f1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1 2024-11-23T03:20:44,761 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:44,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:20:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/8e6ebc119faa4199996681960e48c530 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530 2024-11-23T03:20:44,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:44,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:44,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:20:44,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 498ms, sequenceid=13, compaction requested=false 2024-11-23T03:20:44,805 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T03:20:44,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:44,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:44,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:44,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:44,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ad2a190fdd640ff8b181fecf3b5fa6a is 50, key is test_row_0/A:col10/1732332044835/Put/seqid=0 2024-11-23T03:20:44,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332104852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332104856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741842_1018 (size=12001) 2024-11-23T03:20:44,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332104871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332104867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332104863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:44,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:44,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332104974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332104976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332104985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332104987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:44,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:44,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332104986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,089 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332105180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332105185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332105200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332105202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332105202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ad2a190fdd640ff8b181fecf3b5fa6a 2024-11-23T03:20:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:45,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/cc76d1ee635c4652a45c924273d48b31 is 50, key is test_row_0/B:col10/1732332044835/Put/seqid=0 2024-11-23T03:20:45,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741843_1019 (size=12001) 2024-11-23T03:20:45,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/cc76d1ee635c4652a45c924273d48b31 2024-11-23T03:20:45,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/eac5cffc7e154faeacb8aa8c48d937fb is 50, key is test_row_0/C:col10/1732332044835/Put/seqid=0 2024-11-23T03:20:45,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741844_1020 (size=12001) 2024-11-23T03:20:45,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/eac5cffc7e154faeacb8aa8c48d937fb 2024-11-23T03:20:45,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/4ad2a190fdd640ff8b181fecf3b5fa6a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a 2024-11-23T03:20:45,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:20:45,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/cc76d1ee635c4652a45c924273d48b31 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31 2024-11-23T03:20:45,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:20:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/eac5cffc7e154faeacb8aa8c48d937fb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb 2024-11-23T03:20:45,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:20:45,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 634ms, sequenceid=39, compaction requested=false 2024-11-23T03:20:45,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:45,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:45,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:45,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:45,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/76eb3d0511cf4d53a6cd5f32701034a9 is 50, key is test_row_0/A:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:45,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741845_1021 (size=14341) 2024-11-23T03:20:45,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/76eb3d0511cf4d53a6cd5f32701034a9 2024-11-23T03:20:45,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0545c36863674ff3994b9b7b3cb596d7 is 50, key is test_row_0/B:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:45,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741846_1022 (size=12001) 2024-11-23T03:20:45,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0545c36863674ff3994b9b7b3cb596d7 2024-11-23T03:20:45,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/fb715793c8f1438ea868148f81ce69f9 is 50, key is test_row_0/C:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:45,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741847_1023 (size=12001) 2024-11-23T03:20:45,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332105588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332105594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332105648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332105648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332105649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332105751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332105753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332105758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332105761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332105761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:45,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:45,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332105958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332105959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332105964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332105966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:45,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332105967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/fb715793c8f1438ea868148f81ce69f9 2024-11-23T03:20:46,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:46,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:46,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:46,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/76eb3d0511cf4d53a6cd5f32701034a9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9 2024-11-23T03:20:46,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9, entries=200, sequenceid=50, filesize=14.0 K 2024-11-23T03:20:46,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0545c36863674ff3994b9b7b3cb596d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7 2024-11-23T03:20:46,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T03:20:46,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/fb715793c8f1438ea868148f81ce69f9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9 2024-11-23T03:20:46,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T03:20:46,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 595ms, sequenceid=50, compaction requested=true 2024-11-23T03:20:46,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:46,096 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:46,097 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:46,101 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:46,102 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:46,103 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,103 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.2 K 2024-11-23T03:20:46,106 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ce5f5634966a4a289a0dc6b7c05270f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332044267 2024-11-23T03:20:46,106 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:46,106 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:46,106 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,107 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cc76d1ee635c4652a45c924273d48b31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332044414 2024-11-23T03:20:46,107 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.4 K 2024-11-23T03:20:46,108 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0545c36863674ff3994b9b7b3cb596d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044863 2024-11-23T03:20:46,108 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ef5d4ef47f94d1e8efd464a3a56e0f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332044267 2024-11-23T03:20:46,110 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ad2a190fdd640ff8b181fecf3b5fa6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332044414 2024-11-23T03:20:46,111 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76eb3d0511cf4d53a6cd5f32701034a9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044859 2024-11-23T03:20:46,159 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#9 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:46,160 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/82f224aef2d0408bb31f44865479f20f is 50, key is test_row_0/A:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:46,168 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#10 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:46,169 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/16b77ce786384720b72f10474718526a is 50, key is test_row_0/B:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:46,181 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T03:20:46,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,184 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:20:46,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:46,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:46,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:46,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741848_1024 (size=12104) 2024-11-23T03:20:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/f5954b2605ab471d96ede3e8fd5d89f4 is 50, key is test_row_0/A:col10/1732332045595/Put/seqid=0 2024-11-23T03:20:46,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741849_1025 (size=12104) 2024-11-23T03:20:46,243 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/16b77ce786384720b72f10474718526a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/16b77ce786384720b72f10474718526a 2024-11-23T03:20:46,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741850_1026 (size=12001) 2024-11-23T03:20:46,263 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/f5954b2605ab471d96ede3e8fd5d89f4 2024-11-23T03:20:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:46,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:46,274 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 16b77ce786384720b72f10474718526a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:46,274 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,275 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332046096; duration=0sec 2024-11-23T03:20:46,275 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:46,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:46,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:46,278 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:46,278 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:46,278 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,279 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.2 K 2024-11-23T03:20:46,280 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e6ebc119faa4199996681960e48c530, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332044267 2024-11-23T03:20:46,282 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting eac5cffc7e154faeacb8aa8c48d937fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332044414 2024-11-23T03:20:46,283 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fb715793c8f1438ea868148f81ce69f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044863 2024-11-23T03:20:46,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/33d1b166ad7a43b88d310aa2f61710e5 is 50, key is test_row_0/B:col10/1732332045595/Put/seqid=0 2024-11-23T03:20:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332106279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332106277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332106295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:46,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332106298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332106298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741851_1027 (size=12001) 2024-11-23T03:20:46,312 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/33d1b166ad7a43b88d310aa2f61710e5 2024-11-23T03:20:46,327 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#13 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:46,328 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/cb29de31d75f4bda8a2af0ee6b06f9f9 is 50, key is test_row_0/C:col10/1732332045489/Put/seqid=0 2024-11-23T03:20:46,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/10f8a37975214c0ab0855847a84af4f9 is 50, key is test_row_0/C:col10/1732332045595/Put/seqid=0 2024-11-23T03:20:46,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741852_1028 (size=12104) 2024-11-23T03:20:46,372 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/cb29de31d75f4bda8a2af0ee6b06f9f9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cb29de31d75f4bda8a2af0ee6b06f9f9 2024-11-23T03:20:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741853_1029 (size=12001) 2024-11-23T03:20:46,378 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/10f8a37975214c0ab0855847a84af4f9 2024-11-23T03:20:46,388 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into cb29de31d75f4bda8a2af0ee6b06f9f9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:46,388 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,388 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332046096; duration=0sec 2024-11-23T03:20:46,389 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,389 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/f5954b2605ab471d96ede3e8fd5d89f4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4 2024-11-23T03:20:46,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332106401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332106401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332106406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332106408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,416 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:20:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332106408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/33d1b166ad7a43b88d310aa2f61710e5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5 2024-11-23T03:20:46,429 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:20:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/10f8a37975214c0ab0855847a84af4f9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9 2024-11-23T03:20:46,444 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:20:46,447 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 261ms, sequenceid=77, compaction requested=false 2024-11-23T03:20:46,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T03:20:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-23T03:20:46,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-23T03:20:46,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2590 sec 2024-11-23T03:20:46,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.2730 sec 2024-11-23T03:20:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:46,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:20:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:46,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,623 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/82f224aef2d0408bb31f44865479f20f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/82f224aef2d0408bb31f44865479f20f 2024-11-23T03:20:46,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/38a69cc8d9734482acdd71124ccf9f7e is 50, key is test_row_0/A:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:46,642 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into 82f224aef2d0408bb31f44865479f20f(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:46,643 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,643 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332046093; duration=0sec 2024-11-23T03:20:46,644 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,645 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:46,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741854_1030 (size=12001) 2024-11-23T03:20:46,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/38a69cc8d9734482acdd71124ccf9f7e 2024-11-23T03:20:46,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332106654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332106656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332106656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332106660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332106660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/a1f7748647ac4445b4e441f75c60d6de is 50, key is test_row_0/B:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:46,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741855_1031 (size=12001) 2024-11-23T03:20:46,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/a1f7748647ac4445b4e441f75c60d6de 2024-11-23T03:20:46,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/60c6c042c5a54a149e47133a9df7eaf5 is 50, key is test_row_0/C:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:46,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741856_1032 (size=12001) 2024-11-23T03:20:46,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/60c6c042c5a54a149e47133a9df7eaf5 2024-11-23T03:20:46,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/38a69cc8d9734482acdd71124ccf9f7e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e 2024-11-23T03:20:46,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:20:46,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/a1f7748647ac4445b4e441f75c60d6de as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de 2024-11-23T03:20:46,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332106763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332106764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332106765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332106765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332106766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:46,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:20:46,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/60c6c042c5a54a149e47133a9df7eaf5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5 2024-11-23T03:20:46,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:20:46,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 185ms, sequenceid=90, compaction requested=true 2024-11-23T03:20:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,799 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:46,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:46,800 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:46,802 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:46,802 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:46,802 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,803 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/16b77ce786384720b72f10474718526a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.3 K 2024-11-23T03:20:46,803 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:46,803 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:46,803 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:46,803 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/82f224aef2d0408bb31f44865479f20f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.3 K 2024-11-23T03:20:46,804 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 16b77ce786384720b72f10474718526a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044863 2024-11-23T03:20:46,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82f224aef2d0408bb31f44865479f20f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044863 2024-11-23T03:20:46,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 33d1b166ad7a43b88d310aa2f61710e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332045580 2024-11-23T03:20:46,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5954b2605ab471d96ede3e8fd5d89f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332045580 2024-11-23T03:20:46,807 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a1f7748647ac4445b4e441f75c60d6de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:46,807 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38a69cc8d9734482acdd71124ccf9f7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:46,839 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:46,844 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:46,845 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/f0455fb3b41a4dc5a6cb795d85aed189 is 50, key is test_row_0/A:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:46,846 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0aa8c34383164e0f8705cbc088c19e14 is 50, key is test_row_0/B:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:46,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741857_1033 (size=12207) 2024-11-23T03:20:46,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741858_1034 (size=12207) 2024-11-23T03:20:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:46,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:20:46,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:46,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:46,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:46,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:46,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c8c6d747154e41849df6b6a82256b9fc is 50, key is test_row_0/A:col10/1732332046973/Put/seqid=0 2024-11-23T03:20:47,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741859_1035 (size=12001) 2024-11-23T03:20:47,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332106989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332106989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332107047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332107048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332107048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332107152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332107152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332107154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332107155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332107156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,244 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:20:47,290 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0aa8c34383164e0f8705cbc088c19e14 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0aa8c34383164e0f8705cbc088c19e14 2024-11-23T03:20:47,306 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/f0455fb3b41a4dc5a6cb795d85aed189 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f0455fb3b41a4dc5a6cb795d85aed189 2024-11-23T03:20:47,311 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 0aa8c34383164e0f8705cbc088c19e14(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:47,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:47,311 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332046799; duration=0sec 2024-11-23T03:20:47,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:47,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:47,312 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:47,316 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:47,316 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:47,317 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:47,317 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cb29de31d75f4bda8a2af0ee6b06f9f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.3 K 2024-11-23T03:20:47,318 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cb29de31d75f4bda8a2af0ee6b06f9f9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332044863 2024-11-23T03:20:47,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 10f8a37975214c0ab0855847a84af4f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332045580 2024-11-23T03:20:47,321 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into f0455fb3b41a4dc5a6cb795d85aed189(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:47,321 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:47,321 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332046799; duration=0sec 2024-11-23T03:20:47,321 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:47,321 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:47,322 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 60c6c042c5a54a149e47133a9df7eaf5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:47,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T03:20:47,339 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-23T03:20:47,346 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:47,347 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/ae178b8c324d43a1a50789f054bd7b13 is 50, key is test_row_0/C:col10/1732332046293/Put/seqid=0 2024-11-23T03:20:47,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332107358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332107359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332107362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332107363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332107371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741860_1036 (size=12207) 2024-11-23T03:20:47,389 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/ae178b8c324d43a1a50789f054bd7b13 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ae178b8c324d43a1a50789f054bd7b13 2024-11-23T03:20:47,402 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into ae178b8c324d43a1a50789f054bd7b13(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:47,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:47,403 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332046800; duration=0sec 2024-11-23T03:20:47,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:47,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:47,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c8c6d747154e41849df6b6a82256b9fc 2024-11-23T03:20:47,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/4ee84687c93247c8bbde6cedf390936f is 50, key is test_row_0/B:col10/1732332046973/Put/seqid=0 2024-11-23T03:20:47,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741861_1037 (size=12001) 2024-11-23T03:20:47,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/4ee84687c93247c8bbde6cedf390936f 2024-11-23T03:20:47,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/33ff9115b247486eae5b1224bb0e5af5 is 50, key is test_row_0/C:col10/1732332046973/Put/seqid=0 2024-11-23T03:20:47,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741862_1038 (size=12001) 2024-11-23T03:20:47,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/33ff9115b247486eae5b1224bb0e5af5 2024-11-23T03:20:47,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c8c6d747154e41849df6b6a82256b9fc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc 2024-11-23T03:20:47,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc, entries=150, sequenceid=117, filesize=11.7 K 2024-11-23T03:20:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/4ee84687c93247c8bbde6cedf390936f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f 2024-11-23T03:20:47,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f, entries=150, sequenceid=117, filesize=11.7 K 2024-11-23T03:20:47,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/33ff9115b247486eae5b1224bb0e5af5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5 2024-11-23T03:20:47,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5, entries=150, sequenceid=117, filesize=11.7 K 2024-11-23T03:20:47,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 571ms, sequenceid=117, compaction requested=false 2024-11-23T03:20:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:47,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T03:20:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:47,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:47,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:47,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:47,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:47,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:47,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:47,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/03c349c5e7024a068e5d95d4573f6e9d is 50, key is test_row_0/A:col10/1732332046994/Put/seqid=0 2024-11-23T03:20:47,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741863_1039 (size=14391) 2024-11-23T03:20:47,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/03c349c5e7024a068e5d95d4573f6e9d 2024-11-23T03:20:47,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332107713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332107713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332107714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332107717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332107717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/27fb17000dcb49d4be52d7437ed124e7 is 50, key is test_row_0/B:col10/1732332046994/Put/seqid=0 2024-11-23T03:20:47,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741864_1040 (size=12051) 2024-11-23T03:20:47,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/27fb17000dcb49d4be52d7437ed124e7 2024-11-23T03:20:47,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/24e206f24f17414fa8a8c45d19c1029a is 50, key is test_row_0/C:col10/1732332046994/Put/seqid=0 2024-11-23T03:20:47,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741865_1041 (size=12051) 2024-11-23T03:20:47,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332107823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332107823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332107824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332107827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:47,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:47,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332107827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332108029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332108030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332108030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332108033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332108033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/24e206f24f17414fa8a8c45d19c1029a 2024-11-23T03:20:48,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/03c349c5e7024a068e5d95d4573f6e9d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d 2024-11-23T03:20:48,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d, entries=200, sequenceid=132, filesize=14.1 K 2024-11-23T03:20:48,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/27fb17000dcb49d4be52d7437ed124e7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7 2024-11-23T03:20:48,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7, entries=150, sequenceid=132, filesize=11.8 K 2024-11-23T03:20:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/24e206f24f17414fa8a8c45d19c1029a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a 2024-11-23T03:20:48,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a, entries=150, sequenceid=132, filesize=11.8 K 2024-11-23T03:20:48,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 594ms, sequenceid=132, compaction requested=true 2024-11-23T03:20:48,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:48,264 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:48,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:48,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:48,265 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:48,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:48,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:48,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:48,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:48,266 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:48,267 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:48,267 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,267 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:48,267 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f0455fb3b41a4dc5a6cb795d85aed189, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.7 K 2024-11-23T03:20:48,267 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:48,267 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,267 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0aa8c34383164e0f8705cbc088c19e14, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.4 K 2024-11-23T03:20:48,268 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0455fb3b41a4dc5a6cb795d85aed189, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:48,268 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aa8c34383164e0f8705cbc088c19e14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:48,269 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8c6d747154e41849df6b6a82256b9fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332046652 2024-11-23T03:20:48,269 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ee84687c93247c8bbde6cedf390936f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332046652 2024-11-23T03:20:48,270 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03c349c5e7024a068e5d95d4573f6e9d, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732332046982 2024-11-23T03:20:48,270 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 27fb17000dcb49d4be52d7437ed124e7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732332046982 2024-11-23T03:20:48,286 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#27 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:48,287 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/ae2cd3caaa0f471ab355abf49fe78280 is 50, key is test_row_0/A:col10/1732332046994/Put/seqid=0 2024-11-23T03:20:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T03:20:48,307 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:48,308 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-23T03:20:48,308 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/5819b47b49024c34846d1d9c9ff06459 is 50, key is test_row_0/B:col10/1732332046994/Put/seqid=0 2024-11-23T03:20:48,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-23T03:20:48,314 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:48,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:48,316 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:48,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:48,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:48,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:48,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741866_1042 (size=12359) 2024-11-23T03:20:48,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332108350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332108350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332108352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332108354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332108355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5aa2cf65a414bdaa35efe916a9bb0b1 is 50, key is test_row_0/A:col10/1732332048338/Put/seqid=0 2024-11-23T03:20:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741867_1043 (size=12359) 2024-11-23T03:20:48,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741868_1044 (size=16931) 2024-11-23T03:20:48,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5aa2cf65a414bdaa35efe916a9bb0b1 2024-11-23T03:20:48,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/f13068eaac04428caf48e417d29f7a94 is 50, key is test_row_0/B:col10/1732332048338/Put/seqid=0 2024-11-23T03:20:48,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741869_1045 (size=12151) 2024-11-23T03:20:48,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/f13068eaac04428caf48e417d29f7a94 2024-11-23T03:20:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:48,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/d28d95cae5454630a89dac482e971ab1 is 50, key is test_row_0/C:col10/1732332048338/Put/seqid=0 2024-11-23T03:20:48,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741870_1046 (size=12151) 2024-11-23T03:20:48,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/d28d95cae5454630a89dac482e971ab1 2024-11-23T03:20:48,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5aa2cf65a414bdaa35efe916a9bb0b1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1 2024-11-23T03:20:48,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332108457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332108457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332108457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332108458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332108460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1, entries=250, sequenceid=159, filesize=16.5 K 2024-11-23T03:20:48,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T03:20:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/f13068eaac04428caf48e417d29f7a94 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94 2024-11-23T03:20:48,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94, entries=150, sequenceid=159, filesize=11.9 K 2024-11-23T03:20:48,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/d28d95cae5454630a89dac482e971ab1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1 2024-11-23T03:20:48,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1, entries=150, sequenceid=159, filesize=11.9 K 2024-11-23T03:20:48,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 159ms, sequenceid=159, compaction requested=true 2024-11-23T03:20:48,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:48,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-23T03:20:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:48,625 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T03:20:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,626 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T03:20:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:48,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:48,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:48,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5faa927d15c42b882f80b68d5680348 is 50, key is test_row_0/A:col10/1732332048352/Put/seqid=0 2024-11-23T03:20:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741871_1047 (size=12151) 2024-11-23T03:20:48,655 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5faa927d15c42b882f80b68d5680348 2024-11-23T03:20:48,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:48,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:48,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/59f9c8eee2ff4ba39c61ea9f43b8b1df is 50, key is test_row_0/B:col10/1732332048352/Put/seqid=0 2024-11-23T03:20:48,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741872_1048 (size=12151) 2024-11-23T03:20:48,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332108697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332108701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332108701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332108703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332108703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,763 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/ae2cd3caaa0f471ab355abf49fe78280 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/ae2cd3caaa0f471ab355abf49fe78280 2024-11-23T03:20:48,777 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/5819b47b49024c34846d1d9c9ff06459 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/5819b47b49024c34846d1d9c9ff06459 2024-11-23T03:20:48,789 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into ae2cd3caaa0f471ab355abf49fe78280(size=12.1 K), total size for store is 28.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:48,789 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:48,789 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332048264; duration=0sec 2024-11-23T03:20:48,789 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-23T03:20:48,789 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:48,790 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:48,790 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:48,791 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 5819b47b49024c34846d1d9c9ff06459(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:48,792 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:48,793 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332048265; duration=0sec 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-23T03:20:48,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:20:48,794 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:48,794 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:20:48,794 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. because compaction request was cancelled 2024-11-23T03:20:48,794 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:48,794 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ae178b8c324d43a1a50789f054bd7b13, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=47.3 K 2024-11-23T03:20:48,794 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae178b8c324d43a1a50789f054bd7b13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332046273 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. because compaction request was cancelled 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:20:48,795 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33ff9115b247486eae5b1224bb0e5af5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332046652 2024-11-23T03:20:48,797 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:20:48,797 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:20:48,797 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. because compaction request was cancelled 2024-11-23T03:20:48,797 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:48,797 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24e206f24f17414fa8a8c45d19c1029a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732332046982 2024-11-23T03:20:48,798 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d28d95cae5454630a89dac482e971ab1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332047714 2024-11-23T03:20:48,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332108807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332108807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332108808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332108810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332108810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:48,821 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:48,822 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/27657c7dc72d49f4826ba3568efc2a56 is 50, key is test_row_0/C:col10/1732332048338/Put/seqid=0 2024-11-23T03:20:48,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741873_1049 (size=12493) 2024-11-23T03:20:48,841 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/27657c7dc72d49f4826ba3568efc2a56 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27657c7dc72d49f4826ba3568efc2a56 2024-11-23T03:20:48,855 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into 27657c7dc72d49f4826ba3568efc2a56(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:48,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:48,856 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=12, startTime=1732332048501; duration=0sec 2024-11-23T03:20:48,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:48,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:49,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332109010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332109012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332109012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332109013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332109015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,089 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/59f9c8eee2ff4ba39c61ea9f43b8b1df 2024-11-23T03:20:49,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/1f3211fc15d7478da6e7abda2aa58297 is 50, key is test_row_0/C:col10/1732332048352/Put/seqid=0 2024-11-23T03:20:49,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741874_1050 (size=12151) 2024-11-23T03:20:49,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332109314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332109318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332109319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332109319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332109319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:49,533 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/1f3211fc15d7478da6e7abda2aa58297 2024-11-23T03:20:49,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d5faa927d15c42b882f80b68d5680348 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348 2024-11-23T03:20:49,554 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:20:49,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/59f9c8eee2ff4ba39c61ea9f43b8b1df as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df 2024-11-23T03:20:49,568 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:20:49,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/1f3211fc15d7478da6e7abda2aa58297 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297 2024-11-23T03:20:49,579 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:20:49,580 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 954ms, sequenceid=169, compaction requested=true 2024-11-23T03:20:49,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:49,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:49,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-23T03:20:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-23T03:20:49,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-23T03:20:49,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2660 sec 2024-11-23T03:20:49,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.2750 sec 2024-11-23T03:20:49,681 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T03:20:49,681 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T03:20:49,685 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-23T03:20:49,685 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-23T03:20:49,687 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T03:20:49,687 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T03:20:49,688 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T03:20:49,688 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T03:20:49,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T03:20:49,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T03:20:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:49,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:49,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/11997243c06240498fe3a055a456d09b is 50, key is test_row_0/A:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:49,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332109831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332109831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332109834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332109835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332109835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741875_1051 (size=14541) 2024-11-23T03:20:49,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/11997243c06240498fe3a055a456d09b 2024-11-23T03:20:49,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3ddf872800f4483090a9fa9b8826b1f9 is 50, key is test_row_0/B:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741876_1052 (size=12151) 2024-11-23T03:20:49,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3ddf872800f4483090a9fa9b8826b1f9 2024-11-23T03:20:49,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/27754ca6e8b140bfa4ff2d48e4fda9d7 is 50, key is test_row_0/C:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741877_1053 (size=12151) 2024-11-23T03:20:49,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/27754ca6e8b140bfa4ff2d48e4fda9d7 2024-11-23T03:20:49,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/11997243c06240498fe3a055a456d09b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b 2024-11-23T03:20:49,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332109938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332109941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332109942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332109942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:49,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b, entries=200, sequenceid=200, filesize=14.2 K 2024-11-23T03:20:49,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3ddf872800f4483090a9fa9b8826b1f9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9 2024-11-23T03:20:49,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9, entries=150, sequenceid=200, filesize=11.9 K 2024-11-23T03:20:49,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/27754ca6e8b140bfa4ff2d48e4fda9d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7 2024-11-23T03:20:49,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7, entries=150, sequenceid=200, filesize=11.9 K 2024-11-23T03:20:49,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 148ms, sequenceid=200, compaction requested=true 2024-11-23T03:20:49,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:49,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:49,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:49,971 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:49,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:49,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:49,973 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55982 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:49,974 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:49,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:49,974 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:49,974 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:49,974 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:49,974 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:49,974 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/ae2cd3caaa0f471ab355abf49fe78280, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=54.7 K 2024-11-23T03:20:49,974 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/5819b47b49024c34846d1d9c9ff06459, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=47.7 K 2024-11-23T03:20:49,975 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5819b47b49024c34846d1d9c9ff06459, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732332046982 2024-11-23T03:20:49,975 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae2cd3caaa0f471ab355abf49fe78280, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732332046982 2024-11-23T03:20:49,976 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f13068eaac04428caf48e417d29f7a94, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332047714 2024-11-23T03:20:49,976 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5aa2cf65a414bdaa35efe916a9bb0b1, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332047708 2024-11-23T03:20:49,977 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 59f9c8eee2ff4ba39c61ea9f43b8b1df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332048347 2024-11-23T03:20:49,977 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5faa927d15c42b882f80b68d5680348, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332048347 2024-11-23T03:20:49,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ddf872800f4483090a9fa9b8826b1f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048700 2024-11-23T03:20:49,979 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11997243c06240498fe3a055a456d09b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048690 2024-11-23T03:20:50,003 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:50,003 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:50,004 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/176acb5738fa467c871427606e7b04da is 50, key is test_row_0/A:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:50,004 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/4ca3da6585904f968bb11e09884955e7 is 50, key is test_row_0/B:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:50,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741878_1054 (size=12595) 2024-11-23T03:20:50,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741879_1055 (size=12595) 2024-11-23T03:20:50,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:50,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:50,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/56d027f43ba94659a0271c07b0691f1a is 50, key is test_row_0/A:col10/1732332050146/Put/seqid=0 2024-11-23T03:20:50,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332110183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332110185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332110185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332110186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741880_1056 (size=12151) 2024-11-23T03:20:50,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332110288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332110291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332110292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332110293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T03:20:50,426 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-23T03:20:50,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-23T03:20:50,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:50,431 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:50,440 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:50,441 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:50,444 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/176acb5738fa467c871427606e7b04da as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/176acb5738fa467c871427606e7b04da 2024-11-23T03:20:50,449 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/4ca3da6585904f968bb11e09884955e7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ca3da6585904f968bb11e09884955e7 2024-11-23T03:20:50,463 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 4ca3da6585904f968bb11e09884955e7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:50,463 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:50,464 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=12, startTime=1732332049971; duration=0sec 2024-11-23T03:20:50,465 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:50,465 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:50,464 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into 176acb5738fa467c871427606e7b04da(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:50,465 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:50,466 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=12, startTime=1732332049971; duration=0sec 2024-11-23T03:20:50,466 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:50,466 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:50,466 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:50,468 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:50,468 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:50,468 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,468 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27657c7dc72d49f4826ba3568efc2a56, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=35.9 K 2024-11-23T03:20:50,469 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27657c7dc72d49f4826ba3568efc2a56, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332047714 2024-11-23T03:20:50,469 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f3211fc15d7478da6e7abda2aa58297, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332048347 2024-11-23T03:20:50,470 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27754ca6e8b140bfa4ff2d48e4fda9d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048700 2024-11-23T03:20:50,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332110494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332110494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332110496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332110498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,505 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:50,506 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/1ebdcfe3f1214b1baf90173c800a10a0 is 50, key is test_row_0/C:col10/1732332048700/Put/seqid=0 2024-11-23T03:20:50,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741881_1057 (size=12595) 2024-11-23T03:20:50,523 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/1ebdcfe3f1214b1baf90173c800a10a0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1ebdcfe3f1214b1baf90173c800a10a0 2024-11-23T03:20:50,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:50,534 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into 1ebdcfe3f1214b1baf90173c800a10a0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:50,534 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:50,534 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332049974; duration=0sec 2024-11-23T03:20:50,534 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:50,534 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:50,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/56d027f43ba94659a0271c07b0691f1a 2024-11-23T03:20:50,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:50,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:50,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/b8c27eb5cdbb477896838fad03c5516e is 50, key is test_row_0/B:col10/1732332050146/Put/seqid=0 2024-11-23T03:20:50,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741882_1058 (size=12151) 2024-11-23T03:20:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:50,748 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:50,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:50,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332110797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332110799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332110800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332110803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:50,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332110839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:50,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:50,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:50,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:50,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:50,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/b8c27eb5cdbb477896838fad03c5516e 2024-11-23T03:20:51,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/59deb629ad2546d7b8df6d3b642684cb is 50, key is test_row_0/C:col10/1732332050146/Put/seqid=0 2024-11-23T03:20:51,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741883_1059 (size=12151) 2024-11-23T03:20:51,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:51,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:51,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,212 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:51,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:51,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332111303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:51,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332111306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:51,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332111307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:51,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332111308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,366 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:51,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/59deb629ad2546d7b8df6d3b642684cb 2024-11-23T03:20:51,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/56d027f43ba94659a0271c07b0691f1a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a 2024-11-23T03:20:51,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a, entries=150, sequenceid=213, filesize=11.9 K 2024-11-23T03:20:51,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/b8c27eb5cdbb477896838fad03c5516e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e 2024-11-23T03:20:51,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e, entries=150, sequenceid=213, filesize=11.9 K 2024-11-23T03:20:51,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/59deb629ad2546d7b8df6d3b642684cb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb 2024-11-23T03:20:51,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb, entries=150, sequenceid=213, filesize=11.9 K 2024-11-23T03:20:51,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 1329ms, sequenceid=213, compaction requested=false 2024-11-23T03:20:51,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:51,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:51,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T03:20:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:51,522 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:51,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:51,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c179245ee63f415eb8d1c3d9a89ec3b2 is 50, key is test_row_0/A:col10/1732332050185/Put/seqid=0 2024-11-23T03:20:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741884_1060 (size=12151) 2024-11-23T03:20:51,938 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c179245ee63f415eb8d1c3d9a89ec3b2 2024-11-23T03:20:51,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/887b3ee226ba4a838041c59ae78ab1a5 is 50, key is test_row_0/B:col10/1732332050185/Put/seqid=0 2024-11-23T03:20:51,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741885_1061 (size=12151) 2024-11-23T03:20:51,998 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/887b3ee226ba4a838041c59ae78ab1a5 2024-11-23T03:20:52,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/374adeb2237e4264a44f8e79201c5a51 is 50, key is test_row_0/C:col10/1732332050185/Put/seqid=0 2024-11-23T03:20:52,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741886_1062 (size=12151) 2024-11-23T03:20:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:52,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:52,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332112322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332112324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332112325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332112326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332112427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332112428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332112428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332112429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,452 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/374adeb2237e4264a44f8e79201c5a51 2024-11-23T03:20:52,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/c179245ee63f415eb8d1c3d9a89ec3b2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2 2024-11-23T03:20:52,466 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2, entries=150, sequenceid=239, filesize=11.9 K 2024-11-23T03:20:52,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/887b3ee226ba4a838041c59ae78ab1a5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5 2024-11-23T03:20:52,474 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5, entries=150, sequenceid=239, filesize=11.9 K 2024-11-23T03:20:52,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/374adeb2237e4264a44f8e79201c5a51 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51 2024-11-23T03:20:52,482 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51, entries=150, sequenceid=239, filesize=11.9 K 2024-11-23T03:20:52,483 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 961ms, sequenceid=239, compaction requested=true 2024-11-23T03:20:52,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:52,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:52,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T03:20:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-23T03:20:52,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-23T03:20:52,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0450 sec 2024-11-23T03:20:52,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.0590 sec 2024-11-23T03:20:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T03:20:52,538 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-23T03:20:52,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-23T03:20:52,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:52,543 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:52,544 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:52,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:52,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:20:52,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:52,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:52,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:52,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:52,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:52,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:52,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/01d78a6e5441471389a141bf532d3e86 is 50, key is test_row_0/A:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:52,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332112659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741887_1063 (size=12151) 2024-11-23T03:20:52,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/01d78a6e5441471389a141bf532d3e86 2024-11-23T03:20:52,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332112662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332112665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332112664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c76e5caae0d4160993a0f01b9d10822 is 50, key is test_row_0/B:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:52,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:52,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:52,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:52,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:52,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741888_1064 (size=12151) 2024-11-23T03:20:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332112767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332112771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332112771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332112772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332112843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,846 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:20:52,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:52,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:52,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:52,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:52,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:52,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332112972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332112974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332112976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:52,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:52,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332112978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,004 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c76e5caae0d4160993a0f01b9d10822 2024-11-23T03:20:53,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/30d5425728204803a946ccd7b98cf1bc is 50, key is test_row_0/C:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:53,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741889_1065 (size=12151) 2024-11-23T03:20:53,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:53,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332113276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332113277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332113280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332113281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,314 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,468 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:53,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/30d5425728204803a946ccd7b98cf1bc 2024-11-23T03:20:53,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/01d78a6e5441471389a141bf532d3e86 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86 2024-11-23T03:20:53,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86, entries=150, sequenceid=255, filesize=11.9 K 2024-11-23T03:20:53,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c76e5caae0d4160993a0f01b9d10822 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822 2024-11-23T03:20:53,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822, entries=150, sequenceid=255, filesize=11.9 K 2024-11-23T03:20:53,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/30d5425728204803a946ccd7b98cf1bc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc 2024-11-23T03:20:53,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc, entries=150, sequenceid=255, filesize=11.9 K 2024-11-23T03:20:53,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 954ms, sequenceid=255, compaction requested=true 2024-11-23T03:20:53,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:53,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:53,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:53,588 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:53,588 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:53,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:53,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:53,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:53,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:53,591 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:53,591 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:53,591 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:53,591 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:53,591 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,591 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,591 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/176acb5738fa467c871427606e7b04da, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=47.9 K 2024-11-23T03:20:53,591 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ca3da6585904f968bb11e09884955e7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=47.9 K 2024-11-23T03:20:53,592 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ca3da6585904f968bb11e09884955e7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048700 2024-11-23T03:20:53,592 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 176acb5738fa467c871427606e7b04da, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048700 2024-11-23T03:20:53,592 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b8c27eb5cdbb477896838fad03c5516e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332050146 2024-11-23T03:20:53,593 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56d027f43ba94659a0271c07b0691f1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332050146 2024-11-23T03:20:53,593 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 887b3ee226ba4a838041c59ae78ab1a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732332050175 2024-11-23T03:20:53,594 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c179245ee63f415eb8d1c3d9a89ec3b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732332050175 2024-11-23T03:20:53,594 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c76e5caae0d4160993a0f01b9d10822, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:53,595 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01d78a6e5441471389a141bf532d3e86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:53,613 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:53,614 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/8cb37788d4ab493da12fe2c12520a340 is 50, key is test_row_0/B:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:53,618 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:53,619 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/b6016a06c418448890e6c47f5e444545 is 50, key is test_row_0/A:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:53,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T03:20:53,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,624 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:20:53,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:53,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/7f60ed08588a4dada62cbf8dbe423b0d is 50, key is test_row_0/A:col10/1732332052663/Put/seqid=0 2024-11-23T03:20:53,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741890_1066 (size=12731) 2024-11-23T03:20:53,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741891_1067 (size=12731) 2024-11-23T03:20:53,646 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/8cb37788d4ab493da12fe2c12520a340 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8cb37788d4ab493da12fe2c12520a340 2024-11-23T03:20:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:53,659 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/b6016a06c418448890e6c47f5e444545 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b6016a06c418448890e6c47f5e444545 2024-11-23T03:20:53,663 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 8cb37788d4ab493da12fe2c12520a340(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:53,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:53,663 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=12, startTime=1732332053588; duration=0sec 2024-11-23T03:20:53,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:53,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:53,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:20:53,666 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:20:53,666 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:53,666 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:53,666 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1ebdcfe3f1214b1baf90173c800a10a0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=47.9 K 2024-11-23T03:20:53,667 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ebdcfe3f1214b1baf90173c800a10a0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732332048700 2024-11-23T03:20:53,668 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 59deb629ad2546d7b8df6d3b642684cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332050146 2024-11-23T03:20:53,668 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 374adeb2237e4264a44f8e79201c5a51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732332050175 2024-11-23T03:20:53,669 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into b6016a06c418448890e6c47f5e444545(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:53,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:53,669 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=12, startTime=1732332053588; duration=0sec 2024-11-23T03:20:53,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:53,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:53,669 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 30d5425728204803a946ccd7b98cf1bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:53,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741892_1068 (size=12301) 2024-11-23T03:20:53,680 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:53,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/097670f156f749f3884f40f70d516c71 is 50, key is test_row_0/C:col10/1732332052633/Put/seqid=0 2024-11-23T03:20:53,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741893_1069 (size=12731) 2024-11-23T03:20:53,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:53,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332113794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332113794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332113795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332113796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332113900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332113900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332113901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:53,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332113901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,072 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/7f60ed08588a4dada62cbf8dbe423b0d 2024-11-23T03:20:54,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/22cad17295684cf3a6ab624c2e3586ad is 50, key is test_row_0/B:col10/1732332052663/Put/seqid=0 2024-11-23T03:20:54,092 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/097670f156f749f3884f40f70d516c71 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/097670f156f749f3884f40f70d516c71 2024-11-23T03:20:54,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741894_1070 (size=12301) 2024-11-23T03:20:54,101 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into 097670f156f749f3884f40f70d516c71(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:54,101 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:54,101 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=12, startTime=1732332053589; duration=0sec 2024-11-23T03:20:54,101 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:54,101 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:54,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332114106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332114106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332114106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332114107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332114409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332114410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332114410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332114413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,495 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/22cad17295684cf3a6ab624c2e3586ad 2024-11-23T03:20:54,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/15b44c0879314c8d838c38890f8abc0d is 50, key is test_row_0/C:col10/1732332052663/Put/seqid=0 2024-11-23T03:20:54,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741895_1071 (size=12301) 2024-11-23T03:20:54,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:54,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/15b44c0879314c8d838c38890f8abc0d 2024-11-23T03:20:54,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332114913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332114913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332114914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:54,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332114915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:54,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/7f60ed08588a4dada62cbf8dbe423b0d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d 2024-11-23T03:20:54,933 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d, entries=150, sequenceid=275, filesize=12.0 K 2024-11-23T03:20:54,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/22cad17295684cf3a6ab624c2e3586ad as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad 2024-11-23T03:20:54,951 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad, entries=150, sequenceid=275, filesize=12.0 K 2024-11-23T03:20:54,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/15b44c0879314c8d838c38890f8abc0d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d 2024-11-23T03:20:54,959 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d, entries=150, sequenceid=275, filesize=12.0 K 2024-11-23T03:20:54,960 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 1336ms, sequenceid=275, compaction requested=false 2024-11-23T03:20:54,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:54,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:54,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-23T03:20:54,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-23T03:20:54,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T03:20:54,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4180 sec 2024-11-23T03:20:54,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.4260 sec 2024-11-23T03:20:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:55,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:55,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:55,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/331a9849288c4b0396055baffc4d7297 is 50, key is test_row_0/A:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:55,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741896_1072 (size=14741) 2024-11-23T03:20:55,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332115934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:55,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:55,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332115965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:55,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332115965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:55,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332115965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332116067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332116069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332116070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332116070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332116270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332116272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332116272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332116273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/331a9849288c4b0396055baffc4d7297 2024-11-23T03:20:56,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3f458f015c724d4b9c3c6ef269f594b8 is 50, key is test_row_0/B:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:56,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741897_1073 (size=12301) 2024-11-23T03:20:56,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332116572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332116575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332116576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332116576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T03:20:56,648 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-23T03:20:56,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:56,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-23T03:20:56,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T03:20:56,652 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:56,653 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:56,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:56,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T03:20:56,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3f458f015c724d4b9c3c6ef269f594b8 2024-11-23T03:20:56,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/c303347dbe11461f8d6932e53d616e5d is 50, key is test_row_0/C:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741898_1074 (size=12301) 2024-11-23T03:20:56,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/c303347dbe11461f8d6932e53d616e5d 2024-11-23T03:20:56,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/331a9849288c4b0396055baffc4d7297 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297 2024-11-23T03:20:56,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297, entries=200, sequenceid=295, filesize=14.4 K 2024-11-23T03:20:56,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3f458f015c724d4b9c3c6ef269f594b8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8 2024-11-23T03:20:56,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8, entries=150, sequenceid=295, filesize=12.0 K 2024-11-23T03:20:56,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/c303347dbe11461f8d6932e53d616e5d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d 2024-11-23T03:20:56,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d, entries=150, sequenceid=295, filesize=12.0 K 2024-11-23T03:20:56,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 880ms, sequenceid=295, compaction requested=true 2024-11-23T03:20:56,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:56,800 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:56,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:56,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:56,800 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:56,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:56,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:56,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:56,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:56,801 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:56,802 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:56,802 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:56,802 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b6016a06c418448890e6c47f5e444545, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=38.8 K 2024-11-23T03:20:56,802 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6016a06c418448890e6c47f5e444545, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:56,803 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:56,803 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:56,803 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:56,803 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8cb37788d4ab493da12fe2c12520a340, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.5 K 2024-11-23T03:20:56,804 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f60ed08588a4dada62cbf8dbe423b0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732332052660 2024-11-23T03:20:56,804 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cb37788d4ab493da12fe2c12520a340, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:56,804 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 22cad17295684cf3a6ab624c2e3586ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732332052660 2024-11-23T03:20:56,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 331a9849288c4b0396055baffc4d7297, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:56,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f458f015c724d4b9c3c6ef269f594b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:56,806 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T03:20:56,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:56,806 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:56,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:56,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8f2ac17472464e92a353e2de17401c73 is 50, key is test_row_0/A:col10/1732332055933/Put/seqid=0 2024-11-23T03:20:56,821 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:56,821 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/6e5cc338d779432b893772ad1ea381f4 is 50, key is test_row_0/A:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:56,830 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:56,831 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/eaca0463d8cf459caded2d80eab0954e is 50, key is test_row_0/B:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:56,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741900_1076 (size=12983) 2024-11-23T03:20:56,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741899_1075 (size=12301) 2024-11-23T03:20:56,843 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8f2ac17472464e92a353e2de17401c73 2024-11-23T03:20:56,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741901_1077 (size=12983) 2024-11-23T03:20:56,855 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/6e5cc338d779432b893772ad1ea381f4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/6e5cc338d779432b893772ad1ea381f4 2024-11-23T03:20:56,858 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/eaca0463d8cf459caded2d80eab0954e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/eaca0463d8cf459caded2d80eab0954e 2024-11-23T03:20:56,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/86b4aadcea924f00a0fa3bfd63768cf1 is 50, key is test_row_0/B:col10/1732332055933/Put/seqid=0 2024-11-23T03:20:56,865 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into 6e5cc338d779432b893772ad1ea381f4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:56,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:56,866 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332056800; duration=0sec 2024-11-23T03:20:56,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:56,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:56,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:56,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:56,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:56,867 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into eaca0463d8cf459caded2d80eab0954e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:56,867 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:56,868 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332056800; duration=0sec 2024-11-23T03:20:56,868 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:56,868 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:56,868 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:56,868 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:56,868 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:56,869 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/097670f156f749f3884f40f70d516c71, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.5 K 2024-11-23T03:20:56,869 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 097670f156f749f3884f40f70d516c71, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732332052320 2024-11-23T03:20:56,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15b44c0879314c8d838c38890f8abc0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732332052660 2024-11-23T03:20:56,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c303347dbe11461f8d6932e53d616e5d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:56,882 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:56,882 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/79cc6568b5164e9784090b50598682a8 is 50, key is test_row_0/C:col10/1732332053795/Put/seqid=0 2024-11-23T03:20:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741902_1078 (size=12301) 2024-11-23T03:20:56,884 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/86b4aadcea924f00a0fa3bfd63768cf1 2024-11-23T03:20:56,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741903_1079 (size=12983) 2024-11-23T03:20:56,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5dc6deb5cccc4395baba0eae8cc277d8 is 50, key is test_row_0/C:col10/1732332055933/Put/seqid=0 2024-11-23T03:20:56,902 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/79cc6568b5164e9784090b50598682a8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/79cc6568b5164e9784090b50598682a8 2024-11-23T03:20:56,915 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into 79cc6568b5164e9784090b50598682a8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:56,915 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:56,915 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332056801; duration=0sec 2024-11-23T03:20:56,916 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:56,916 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:56,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741904_1080 (size=12301) 2024-11-23T03:20:56,920 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5dc6deb5cccc4395baba0eae8cc277d8 2024-11-23T03:20:56,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:56,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332116918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:56,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8f2ac17472464e92a353e2de17401c73 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73 2024-11-23T03:20:56,933 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73, entries=150, sequenceid=315, filesize=12.0 K 2024-11-23T03:20:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/86b4aadcea924f00a0fa3bfd63768cf1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1 2024-11-23T03:20:56,939 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1, entries=150, sequenceid=315, filesize=12.0 K 2024-11-23T03:20:56,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5dc6deb5cccc4395baba0eae8cc277d8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8 2024-11-23T03:20:56,949 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8, entries=150, sequenceid=315, filesize=12.0 K 2024-11-23T03:20:56,951 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 145ms, sequenceid=315, compaction requested=false 2024-11-23T03:20:56,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:56,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:56,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-23T03:20:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-23T03:20:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T03:20:56,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-23T03:20:56,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 300 msec 2024-11-23T03:20:56,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 307 msec 2024-11-23T03:20:57,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:57,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:57,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/e84ee9882d084c52b7168805ef015f7a is 50, key is test_row_0/A:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741905_1081 (size=14741) 2024-11-23T03:20:57,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/e84ee9882d084c52b7168805ef015f7a 2024-11-23T03:20:57,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/7ecf281291e6409f924b9cbb247ae9b8 is 50, key is test_row_0/B:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332117073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332117075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332117078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332117079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332117082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741906_1082 (size=12301) 2024-11-23T03:20:57,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/7ecf281291e6409f924b9cbb247ae9b8 2024-11-23T03:20:57,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/bf91e5e501d0403c8406da1a7b5f9371 is 50, key is test_row_0/C:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741907_1083 (size=12301) 2024-11-23T03:20:57,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/bf91e5e501d0403c8406da1a7b5f9371 2024-11-23T03:20:57,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/e84ee9882d084c52b7168805ef015f7a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a 2024-11-23T03:20:57,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a, entries=200, sequenceid=335, filesize=14.4 K 2024-11-23T03:20:57,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/7ecf281291e6409f924b9cbb247ae9b8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8 2024-11-23T03:20:57,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8, entries=150, sequenceid=335, filesize=12.0 K 2024-11-23T03:20:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/bf91e5e501d0403c8406da1a7b5f9371 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371 2024-11-23T03:20:57,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371, entries=150, sequenceid=335, filesize=12.0 K 2024-11-23T03:20:57,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 139ms, sequenceid=335, compaction requested=true 2024-11-23T03:20:57,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:57,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,163 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:57,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:57,163 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:57,164 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,165 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:57,165 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,165 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/6e5cc338d779432b893772ad1ea381f4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=39.1 K 2024-11-23T03:20:57,165 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,165 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e5cc338d779432b893772ad1ea381f4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:57,166 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:57,166 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,166 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/eaca0463d8cf459caded2d80eab0954e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.7 K 2024-11-23T03:20:57,166 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f2ac17472464e92a353e2de17401c73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332055933 2024-11-23T03:20:57,166 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting eaca0463d8cf459caded2d80eab0954e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:57,167 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e84ee9882d084c52b7168805ef015f7a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056907 2024-11-23T03:20:57,167 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b4aadcea924f00a0fa3bfd63768cf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332055933 2024-11-23T03:20:57,168 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ecf281291e6409f924b9cbb247ae9b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056911 2024-11-23T03:20:57,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:57,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:57,180 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,181 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/cc4d78d63ac54126accda00e3e467259 is 50, key is test_row_0/A:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,190 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,191 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/6f7fc01ae438444f999cb2f7111fde21 is 50, key is test_row_0/B:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/751c8f4542bf47b1831ad663246af6ce is 50, key is test_row_0/A:col10/1732332057071/Put/seqid=0 2024-11-23T03:20:57,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741908_1084 (size=13085) 2024-11-23T03:20:57,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741909_1085 (size=13085) 2024-11-23T03:20:57,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741910_1086 (size=14741) 2024-11-23T03:20:57,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/751c8f4542bf47b1831ad663246af6ce 2024-11-23T03:20:57,227 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/cc4d78d63ac54126accda00e3e467259 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/cc4d78d63ac54126accda00e3e467259 2024-11-23T03:20:57,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/963dfaf2c083450288f0e529c2cb30b8 is 50, key is test_row_0/B:col10/1732332057071/Put/seqid=0 2024-11-23T03:20:57,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332117231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,236 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into cc4d78d63ac54126accda00e3e467259(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,236 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332057162; duration=0sec 2024-11-23T03:20:57,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:57,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:57,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,239 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,239 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:57,239 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,239 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/79cc6568b5164e9784090b50598682a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.7 K 2024-11-23T03:20:57,240 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79cc6568b5164e9784090b50598682a8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732332053788 2024-11-23T03:20:57,242 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dc6deb5cccc4395baba0eae8cc277d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332055933 2024-11-23T03:20:57,242 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf91e5e501d0403c8406da1a7b5f9371, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056911 2024-11-23T03:20:57,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741911_1087 (size=12301) 2024-11-23T03:20:57,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/963dfaf2c083450288f0e529c2cb30b8 2024-11-23T03:20:57,254 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#73 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T03:20:57,255 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/9accee464d06490ea7043d7d414a5d9c is 50, key is test_row_0/C:col10/1732332057022/Put/seqid=0 2024-11-23T03:20:57,255 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-23T03:20:57,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:57,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-23T03:20:57,259 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:57,259 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:57,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:57,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:57,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741912_1088 (size=13085) 2024-11-23T03:20:57,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/7729b9c5c39c40c5850a77520ca44403 is 50, key is test_row_0/C:col10/1732332057071/Put/seqid=0 2024-11-23T03:20:57,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741913_1089 (size=12301) 2024-11-23T03:20:57,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/7729b9c5c39c40c5850a77520ca44403 2024-11-23T03:20:57,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/751c8f4542bf47b1831ad663246af6ce as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce 2024-11-23T03:20:57,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce, entries=200, sequenceid=355, filesize=14.4 K 2024-11-23T03:20:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/963dfaf2c083450288f0e529c2cb30b8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8 2024-11-23T03:20:57,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8, entries=150, sequenceid=355, filesize=12.0 K 2024-11-23T03:20:57,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/7729b9c5c39c40c5850a77520ca44403 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403 2024-11-23T03:20:57,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403, entries=150, sequenceid=355, filesize=12.0 K 2024-11-23T03:20:57,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 140ms, sequenceid=355, compaction requested=false 2024-11-23T03:20:57,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:57,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:20:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:57,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d8c1efcab5e74324944c844a2edac3fa is 50, key is test_row_0/A:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741914_1090 (size=14741) 2024-11-23T03:20:57,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d8c1efcab5e74324944c844a2edac3fa 2024-11-23T03:20:57,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:57,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/6e22bacff76c4658aeeba2f251dcc191 is 50, key is test_row_0/B:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741915_1091 (size=12301) 2024-11-23T03:20:57,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/6e22bacff76c4658aeeba2f251dcc191 2024-11-23T03:20:57,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332117396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/2514ec632016400cbe11e74a265fadd8 is 50, key is test_row_0/C:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T03:20:57,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741916_1092 (size=12301) 2024-11-23T03:20:57,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:57,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332117499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:57,569 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T03:20:57,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:57,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,618 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/6f7fc01ae438444f999cb2f7111fde21 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6f7fc01ae438444f999cb2f7111fde21 2024-11-23T03:20:57,625 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 6f7fc01ae438444f999cb2f7111fde21(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,625 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,626 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332057163; duration=0sec 2024-11-23T03:20:57,626 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,626 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:57,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/9accee464d06490ea7043d7d414a5d9c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/9accee464d06490ea7043d7d414a5d9c 2024-11-23T03:20:57,676 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into 9accee464d06490ea7043d7d414a5d9c(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,676 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332057163; duration=0sec 2024-11-23T03:20:57,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:57,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332117701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T03:20:57,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:57,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:57,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/2514ec632016400cbe11e74a265fadd8 2024-11-23T03:20:57,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d8c1efcab5e74324944c844a2edac3fa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa 2024-11-23T03:20:57,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa, entries=200, sequenceid=373, filesize=14.4 K 2024-11-23T03:20:57,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/6e22bacff76c4658aeeba2f251dcc191 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191 2024-11-23T03:20:57,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191, entries=150, sequenceid=373, filesize=12.0 K 2024-11-23T03:20:57,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/2514ec632016400cbe11e74a265fadd8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8 2024-11-23T03:20:57,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8, entries=150, sequenceid=373, filesize=12.0 K 2024-11-23T03:20:57,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 507ms, sequenceid=373, compaction requested=true 2024-11-23T03:20:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,847 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,847 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:57,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:57,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:57,849 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42567 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,849 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,849 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:57,849 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:57,849 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,849 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,849 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/cc4d78d63ac54126accda00e3e467259, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=41.6 K 2024-11-23T03:20:57,849 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6f7fc01ae438444f999cb2f7111fde21, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.8 K 2024-11-23T03:20:57,849 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc4d78d63ac54126accda00e3e467259, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056911 2024-11-23T03:20:57,850 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f7fc01ae438444f999cb2f7111fde21, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056911 2024-11-23T03:20:57,850 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 751c8f4542bf47b1831ad663246af6ce, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732332057057 2024-11-23T03:20:57,850 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 963dfaf2c083450288f0e529c2cb30b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732332057065 2024-11-23T03:20:57,851 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8c1efcab5e74324944c844a2edac3fa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:57,851 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e22bacff76c4658aeeba2f251dcc191, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:57,862 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,863 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,863 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8246a7eea5d54bb58da25d95e1e56ebb is 50, key is test_row_0/A:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:57,864 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0d6966d5ba944efaa08889d6e5e82b40 is 50, key is test_row_0/B:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741917_1093 (size=13187) 2024-11-23T03:20:57,876 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:57,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T03:20:57,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,877 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:57,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:57,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741918_1094 (size=13187) 2024-11-23T03:20:57,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/942d4713e166483790d4a659d7303c15 is 50, key is test_row_0/A:col10/1732332057371/Put/seqid=0 2024-11-23T03:20:57,886 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0d6966d5ba944efaa08889d6e5e82b40 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0d6966d5ba944efaa08889d6e5e82b40 2024-11-23T03:20:57,890 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8246a7eea5d54bb58da25d95e1e56ebb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8246a7eea5d54bb58da25d95e1e56ebb 2024-11-23T03:20:57,892 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 0d6966d5ba944efaa08889d6e5e82b40(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,893 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332057847; duration=0sec 2024-11-23T03:20:57,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:57,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:57,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:57,894 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:57,894 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:57,894 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:57,894 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/9accee464d06490ea7043d7d414a5d9c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.8 K 2024-11-23T03:20:57,895 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9accee464d06490ea7043d7d414a5d9c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732332056911 2024-11-23T03:20:57,895 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7729b9c5c39c40c5850a77520ca44403, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732332057065 2024-11-23T03:20:57,896 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2514ec632016400cbe11e74a265fadd8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:57,897 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into 8246a7eea5d54bb58da25d95e1e56ebb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,897 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332057847; duration=0sec 2024-11-23T03:20:57,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:57,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741919_1095 (size=12301) 2024-11-23T03:20:57,907 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:57,908 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/ac8e91edc9524a4faaa58f792a9a2194 is 50, key is test_row_0/C:col10/1732332057216/Put/seqid=0 2024-11-23T03:20:57,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741920_1096 (size=13187) 2024-11-23T03:20:57,927 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/ac8e91edc9524a4faaa58f792a9a2194 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ac8e91edc9524a4faaa58f792a9a2194 2024-11-23T03:20:57,935 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into ac8e91edc9524a4faaa58f792a9a2194(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:57,936 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:57,936 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332057848; duration=0sec 2024-11-23T03:20:57,936 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:57,936 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:20:58,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:58,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332118048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332118081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332118082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332118082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332118090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332118151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,301 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/942d4713e166483790d4a659d7303c15 2024-11-23T03:20:58,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/e763ea0e526347d3867d49f2e9496863 is 50, key is test_row_0/B:col10/1732332057371/Put/seqid=0 2024-11-23T03:20:58,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741921_1097 (size=12301) 2024-11-23T03:20:58,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332118354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:58,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332118656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:58,720 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/e763ea0e526347d3867d49f2e9496863 2024-11-23T03:20:58,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5fa96ffb5f784b6cbf29c312ed065d16 is 50, key is test_row_0/C:col10/1732332057371/Put/seqid=0 2024-11-23T03:20:58,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741922_1098 (size=12301) 2024-11-23T03:20:58,740 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5fa96ffb5f784b6cbf29c312ed065d16 2024-11-23T03:20:58,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/942d4713e166483790d4a659d7303c15 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15 2024-11-23T03:20:58,757 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15, entries=150, sequenceid=394, filesize=12.0 K 2024-11-23T03:20:58,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/e763ea0e526347d3867d49f2e9496863 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863 2024-11-23T03:20:58,764 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863, entries=150, sequenceid=394, filesize=12.0 K 2024-11-23T03:20:58,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/5fa96ffb5f784b6cbf29c312ed065d16 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16 2024-11-23T03:20:58,773 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16, entries=150, sequenceid=394, filesize=12.0 K 2024-11-23T03:20:58,775 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 899ms, sequenceid=394, compaction requested=false 2024-11-23T03:20:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-23T03:20:58,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-23T03:20:58,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T03:20:58,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5170 sec 2024-11-23T03:20:58,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.5240 sec 2024-11-23T03:20:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:59,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:59,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/506bb041ca1848de94cbbf88cb4d3255 is 50, key is test_row_0/A:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741923_1099 (size=12301) 2024-11-23T03:20:59,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332119217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:59,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332119320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T03:20:59,365 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-23T03:20:59,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:20:59,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-23T03:20:59,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:20:59,369 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:20:59,370 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:20:59,370 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:20:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:20:59,522 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T03:20:59,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332119525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/506bb041ca1848de94cbbf88cb4d3255 2024-11-23T03:20:59,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/d9170ac4137441bea2c022adf50e08ad is 50, key is test_row_0/B:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741924_1100 (size=12301) 2024-11-23T03:20:59,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/d9170ac4137441bea2c022adf50e08ad 2024-11-23T03:20:59,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/470c36c1d0464d3f8fa02a466b2d421b is 50, key is test_row_0/C:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741925_1101 (size=12301) 2024-11-23T03:20:59,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/470c36c1d0464d3f8fa02a466b2d421b 2024-11-23T03:20:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:20:59,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/506bb041ca1848de94cbbf88cb4d3255 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255 2024-11-23T03:20:59,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T03:20:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:20:59,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255, entries=150, sequenceid=416, filesize=12.0 K 2024-11-23T03:20:59,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/d9170ac4137441bea2c022adf50e08ad as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad 2024-11-23T03:20:59,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad, entries=150, sequenceid=416, filesize=12.0 K 2024-11-23T03:20:59,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/470c36c1d0464d3f8fa02a466b2d421b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b 2024-11-23T03:20:59,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b, entries=150, sequenceid=416, filesize=12.0 K 2024-11-23T03:20:59,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 529ms, sequenceid=416, compaction requested=true 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:20:59,698 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:59,698 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:20:59,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:59,699 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:59,699 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:20:59,699 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,700 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8246a7eea5d54bb58da25d95e1e56ebb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.9 K 2024-11-23T03:20:59,700 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:59,700 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8246a7eea5d54bb58da25d95e1e56ebb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:59,700 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:20:59,700 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,700 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0d6966d5ba944efaa08889d6e5e82b40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.9 K 2024-11-23T03:20:59,701 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 942d4713e166483790d4a659d7303c15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732332057371 2024-11-23T03:20:59,701 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d6966d5ba944efaa08889d6e5e82b40, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:59,701 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 506bb041ca1848de94cbbf88cb4d3255, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:20:59,701 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e763ea0e526347d3867d49f2e9496863, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732332057371 2024-11-23T03:20:59,702 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d9170ac4137441bea2c022adf50e08ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:20:59,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:59,715 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/f27ea7e6cbc24833944e08e2e79cac78 is 50, key is test_row_0/B:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,722 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#88 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:59,723 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/b0c81a9e7a0b4217bd09c8b514b8fec7 is 50, key is test_row_0/A:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741926_1102 (size=13289) 2024-11-23T03:20:59,735 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/f27ea7e6cbc24833944e08e2e79cac78 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f27ea7e6cbc24833944e08e2e79cac78 2024-11-23T03:20:59,744 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into f27ea7e6cbc24833944e08e2e79cac78(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:59,744 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:59,744 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332059698; duration=0sec 2024-11-23T03:20:59,744 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:20:59,744 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:20:59,744 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:20:59,753 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:20:59,753 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:20:59,753 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,753 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ac8e91edc9524a4faaa58f792a9a2194, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=36.9 K 2024-11-23T03:20:59,754 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ac8e91edc9524a4faaa58f792a9a2194, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732332057200 2024-11-23T03:20:59,754 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fa96ffb5f784b6cbf29c312ed065d16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732332057371 2024-11-23T03:20:59,755 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 470c36c1d0464d3f8fa02a466b2d421b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:20:59,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741927_1103 (size=13289) 2024-11-23T03:20:59,762 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/b0c81a9e7a0b4217bd09c8b514b8fec7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b0c81a9e7a0b4217bd09c8b514b8fec7 2024-11-23T03:20:59,767 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:20:59,768 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f is 50, key is test_row_0/C:col10/1732332059164/Put/seqid=0 2024-11-23T03:20:59,769 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into b0c81a9e7a0b4217bd09c8b514b8fec7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:20:59,769 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:20:59,769 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332059698; duration=0sec 2024-11-23T03:20:59,769 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:20:59,769 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:20:59,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741928_1104 (size=13289) 2024-11-23T03:20:59,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T03:20:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:20:59,830 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:20:59,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:20:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:20:59,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:20:59,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/efae3a7fa84f46da8d2712c9916c6601 is 50, key is test_row_0/A:col10/1732332059216/Put/seqid=0 2024-11-23T03:20:59,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741929_1105 (size=12301) 2024-11-23T03:20:59,871 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/efae3a7fa84f46da8d2712c9916c6601 2024-11-23T03:20:59,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332119873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:20:59,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/042638efae104c298ffe512bde394526 is 50, key is test_row_0/B:col10/1732332059216/Put/seqid=0 2024-11-23T03:20:59,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741930_1106 (size=12301) 2024-11-23T03:20:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:20:59,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:20:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332119976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332120086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,089 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:00,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332120093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,095 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:00,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332120096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,097 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:00,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332120098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,099 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:00,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332120180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,187 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f 2024-11-23T03:21:00,195 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into dcd67e4b4f1a4a5a9a8d1409a0c9036f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:00,195 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:00,195 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332059698; duration=0sec 2024-11-23T03:21:00,196 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:00,196 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:21:00,290 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/042638efae104c298ffe512bde394526 2024-11-23T03:21:00,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/962425e0bc054e47b8b8f3e1411f30d2 is 50, key is test_row_0/C:col10/1732332059216/Put/seqid=0 2024-11-23T03:21:00,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741931_1107 (size=12301) 2024-11-23T03:21:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:21:00,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332120484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:00,707 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/962425e0bc054e47b8b8f3e1411f30d2 2024-11-23T03:21:00,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/efae3a7fa84f46da8d2712c9916c6601 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601 2024-11-23T03:21:00,719 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601, entries=150, sequenceid=435, filesize=12.0 K 2024-11-23T03:21:00,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/042638efae104c298ffe512bde394526 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526 2024-11-23T03:21:00,725 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526, entries=150, sequenceid=435, filesize=12.0 K 2024-11-23T03:21:00,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/962425e0bc054e47b8b8f3e1411f30d2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2 2024-11-23T03:21:00,731 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2, entries=150, sequenceid=435, filesize=12.0 K 2024-11-23T03:21:00,733 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 902ms, sequenceid=435, compaction requested=false 2024-11-23T03:21:00,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:00,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:00,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-23T03:21:00,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-23T03:21:00,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-23T03:21:00,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3640 sec 2024-11-23T03:21:00,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.3700 sec 2024-11-23T03:21:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:00,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:21:00,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:00,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/62b4186e68dc4ea6b35f945637d932ba is 50, key is test_row_0/A:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:01,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741932_1108 (size=14741) 2024-11-23T03:21:01,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332121020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332121122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332121325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/62b4186e68dc4ea6b35f945637d932ba 2024-11-23T03:21:01,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/9da6002b707e4031b1406b902fe366d6 is 50, key is test_row_0/B:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:01,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741933_1109 (size=12301) 2024-11-23T03:21:01,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/9da6002b707e4031b1406b902fe366d6 2024-11-23T03:21:01,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/e95285a3d11b4c948621f59f3d0b2b63 is 50, key is test_row_0/C:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:01,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741934_1110 (size=12301) 2024-11-23T03:21:01,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T03:21:01,473 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-23T03:21:01,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-23T03:21:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:01,476 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:01,477 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:01,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:01,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:01,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:01,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332121628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T03:21:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:01,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T03:21:01,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:01,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:01,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/e95285a3d11b4c948621f59f3d0b2b63 2024-11-23T03:21:01,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/62b4186e68dc4ea6b35f945637d932ba as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba 2024-11-23T03:21:01,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba, entries=200, sequenceid=456, filesize=14.4 K 2024-11-23T03:21:01,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/9da6002b707e4031b1406b902fe366d6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6 2024-11-23T03:21:01,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6, entries=150, sequenceid=456, filesize=12.0 K 2024-11-23T03:21:01,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/e95285a3d11b4c948621f59f3d0b2b63 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63 2024-11-23T03:21:01,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63, entries=150, sequenceid=456, filesize=12.0 K 2024-11-23T03:21:01,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 885ms, sequenceid=456, compaction requested=true 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:01,876 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:01,876 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:01,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:01,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:01,878 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:01,878 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:21:01,878 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,878 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:01,878 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f27ea7e6cbc24833944e08e2e79cac78, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.0 K 2024-11-23T03:21:01,878 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:21:01,878 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,878 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b0c81a9e7a0b4217bd09c8b514b8fec7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=39.4 K 2024-11-23T03:21:01,878 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f27ea7e6cbc24833944e08e2e79cac78, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:21:01,879 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0c81a9e7a0b4217bd09c8b514b8fec7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:21:01,879 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 042638efae104c298ffe512bde394526, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332059198 2024-11-23T03:21:01,879 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting efae3a7fa84f46da8d2712c9916c6601, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332059198 2024-11-23T03:21:01,880 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9da6002b707e4031b1406b902fe366d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:01,880 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62b4186e68dc4ea6b35f945637d932ba, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:01,889 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#96 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:01,890 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#97 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:01,890 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/64f5ca812c914252864f2ffdd758b4a9 is 50, key is test_row_0/A:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:01,891 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/8abd6ed365464902b3be8ddbf762f7b9 is 50, key is test_row_0/B:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741936_1112 (size=13391) 2024-11-23T03:21:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741935_1111 (size=13391) 2024-11-23T03:21:01,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:01,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T03:21:01,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:01,936 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:21:01,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:01,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/66bee1a81083408498ea40a9d71e7db4 is 50, key is test_row_0/A:col10/1732332061018/Put/seqid=0 2024-11-23T03:21:01,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741937_1113 (size=12301) 2024-11-23T03:21:02,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:02,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:02,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:02,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332122183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:02,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:02,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332122285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:02,306 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/8abd6ed365464902b3be8ddbf762f7b9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8abd6ed365464902b3be8ddbf762f7b9 2024-11-23T03:21:02,314 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/64f5ca812c914252864f2ffdd758b4a9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/64f5ca812c914252864f2ffdd758b4a9 2024-11-23T03:21:02,316 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into 8abd6ed365464902b3be8ddbf762f7b9(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:02,316 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:02,317 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332061876; duration=0sec 2024-11-23T03:21:02,318 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:02,318 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:21:02,318 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:02,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:02,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:21:02,320 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:02,320 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.0 K 2024-11-23T03:21:02,321 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting dcd67e4b4f1a4a5a9a8d1409a0c9036f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332058046 2024-11-23T03:21:02,322 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 962425e0bc054e47b8b8f3e1411f30d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332059198 2024-11-23T03:21:02,322 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into 64f5ca812c914252864f2ffdd758b4a9(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:02,322 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:02,322 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332061876; duration=0sec 2024-11-23T03:21:02,322 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:02,322 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:21:02,322 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e95285a3d11b4c948621f59f3d0b2b63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:02,331 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:02,332 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/e12f9ab865384feab0754c6603cecb3e is 50, key is test_row_0/C:col10/1732332059863/Put/seqid=0 2024-11-23T03:21:02,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741938_1114 (size=13391) 2024-11-23T03:21:02,347 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/66bee1a81083408498ea40a9d71e7db4 2024-11-23T03:21:02,354 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/e12f9ab865384feab0754c6603cecb3e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e12f9ab865384feab0754c6603cecb3e 2024-11-23T03:21:02,361 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into e12f9ab865384feab0754c6603cecb3e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:02,361 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:02,361 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332061876; duration=0sec 2024-11-23T03:21:02,361 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:02,361 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:21:02,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c61e5eb638145978f303ca9ac3d0d2e is 50, key is test_row_0/B:col10/1732332061018/Put/seqid=0 2024-11-23T03:21:02,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741939_1115 (size=12301) 2024-11-23T03:21:02,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:02,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332122489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:02,779 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c61e5eb638145978f303ca9ac3d0d2e 2024-11-23T03:21:02,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/87fc89f0aa5f46c68a31378abc8670e8 is 50, key is test_row_0/C:col10/1732332061018/Put/seqid=0 2024-11-23T03:21:02,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332122792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:02,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741940_1116 (size=12301) 2024-11-23T03:21:02,796 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/87fc89f0aa5f46c68a31378abc8670e8 2024-11-23T03:21:02,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/66bee1a81083408498ea40a9d71e7db4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4 2024-11-23T03:21:02,806 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:21:02,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c61e5eb638145978f303ca9ac3d0d2e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e 2024-11-23T03:21:02,813 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:21:02,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/87fc89f0aa5f46c68a31378abc8670e8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8 2024-11-23T03:21:02,819 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:21:02,820 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 884ms, sequenceid=472, compaction requested=false 2024-11-23T03:21:02,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:02,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:02,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-23T03:21:02,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-23T03:21:02,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-23T03:21:02,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3450 sec 2024-11-23T03:21:02,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.3510 sec 2024-11-23T03:21:03,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:03,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T03:21:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:21:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:21:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:21:03,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:03,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/688246a2dc004be49396efa9c1ebaa8e is 50, key is test_row_0/A:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:03,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741941_1117 (size=14741) 2024-11-23T03:21:03,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:03,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332123332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:03,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:03,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332123436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:03,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T03:21:03,581 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-23T03:21:03,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-23T03:21:03,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:03,584 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:03,585 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:03,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332123639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:03,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/688246a2dc004be49396efa9c1ebaa8e 2024-11-23T03:21:03,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c6dd402cbfd46e6b4fda359fc71b761 is 50, key is test_row_0/B:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:03,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741942_1118 (size=12301) 2024-11-23T03:21:03,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:03,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:03,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:03,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:03,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:03,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:03,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332123944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:04,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:04,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:04,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43900 deadline: 1732332124106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:04,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43880 deadline: 1732332124106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,107 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8173 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:04,107 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:04,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:04,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43854 deadline: 1732332124125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,126 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:04,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c6dd402cbfd46e6b4fda359fc71b761 2024-11-23T03:21:04,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:04,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43886 deadline: 1732332124133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,135 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:04,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/414287f67d4445e589d0978ceb3d4120 is 50, key is test_row_0/C:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741943_1119 (size=12301) 2024-11-23T03:21:04,176 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:61411 2024-11-23T03:21:04,176 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:61411 2024-11-23T03:21:04,176 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:04,176 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:04,176 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:61411 2024-11-23T03:21:04,176 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:04,178 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:61411 2024-11-23T03:21:04,178 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:04,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:04,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43850 deadline: 1732332124446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,503 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:04,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:04,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:04,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/414287f67d4445e589d0978ceb3d4120 2024-11-23T03:21:04,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/688246a2dc004be49396efa9c1ebaa8e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e 2024-11-23T03:21:04,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e, entries=200, sequenceid=496, filesize=14.4 K 2024-11-23T03:21:04,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/0c6dd402cbfd46e6b4fda359fc71b761 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761 2024-11-23T03:21:04,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761, entries=150, sequenceid=496, filesize=12.0 K 2024-11-23T03:21:04,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/414287f67d4445e589d0978ceb3d4120 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120 2024-11-23T03:21:04,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120, entries=150, sequenceid=496, filesize=12.0 K 2024-11-23T03:21:04,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 1264ms, sequenceid=496, compaction requested=true 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:04,562 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:04,562 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:04,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4dfbb59dd7b53f05c99615119ca9b6f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:04,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/B is initiating minor compaction (all files) 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/A is initiating minor compaction (all files) 2024-11-23T03:21:04,564 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/B in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,564 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/A in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,564 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8abd6ed365464902b3be8ddbf762f7b9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.1 K 2024-11-23T03:21:04,564 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/64f5ca812c914252864f2ffdd758b4a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=39.5 K 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64f5ca812c914252864f2ffdd758b4a9, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:04,564 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8abd6ed365464902b3be8ddbf762f7b9, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:04,565 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66bee1a81083408498ea40a9d71e7db4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332061006 2024-11-23T03:21:04,565 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c61e5eb638145978f303ca9ac3d0d2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332061006 2024-11-23T03:21:04,565 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c6dd402cbfd46e6b4fda359fc71b761, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732332062167 2024-11-23T03:21:04,565 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 688246a2dc004be49396efa9c1ebaa8e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732332062167 2024-11-23T03:21:04,573 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#B#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:04,573 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#A#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:04,573 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d6a4d98a98204d13bbef15113ada43ac is 50, key is test_row_0/A:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:04,573 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/b4a35ecd85bc4ea484a64186b2bb8968 is 50, key is test_row_0/B:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:04,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741945_1121 (size=13493) 2024-11-23T03:21:04,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741944_1120 (size=13493) 2024-11-23T03:21:04,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:04,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T03:21:04,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,657 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:21:04,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:21:04,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:04,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:21:04,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:04,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:21:04,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:04,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8de610127b2f45e78209148171114ea7 is 50, key is test_row_0/A:col10/1732332063328/Put/seqid=0 2024-11-23T03:21:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741946_1122 (size=12301) 2024-11-23T03:21:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:04,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/b4a35ecd85bc4ea484a64186b2bb8968 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b4a35ecd85bc4ea484a64186b2bb8968 2024-11-23T03:21:04,984 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/d6a4d98a98204d13bbef15113ada43ac as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d6a4d98a98204d13bbef15113ada43ac 2024-11-23T03:21:04,989 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/B of 4dfbb59dd7b53f05c99615119ca9b6f4 into b4a35ecd85bc4ea484a64186b2bb8968(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:04,989 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/A of 4dfbb59dd7b53f05c99615119ca9b6f4 into d6a4d98a98204d13bbef15113ada43ac(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:04,989 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/B, priority=13, startTime=1732332064562; duration=0sec 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:04,989 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/A, priority=13, startTime=1732332064562; duration=0sec 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:B 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:04,989 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:A 2024-11-23T03:21:04,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:04,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 4dfbb59dd7b53f05c99615119ca9b6f4/C is initiating minor compaction (all files) 2024-11-23T03:21:04,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4dfbb59dd7b53f05c99615119ca9b6f4/C in TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:04,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e12f9ab865384feab0754c6603cecb3e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp, totalSize=37.1 K 2024-11-23T03:21:04,991 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e12f9ab865384feab0754c6603cecb3e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1732332059863 2024-11-23T03:21:04,991 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 87fc89f0aa5f46c68a31378abc8670e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332061006 2024-11-23T03:21:04,991 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 414287f67d4445e589d0978ceb3d4120, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732332062167 2024-11-23T03:21:04,998 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4dfbb59dd7b53f05c99615119ca9b6f4#C#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:04,998 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/c3e0a7e85125448598216b1e1d00cb14 is 50, key is test_row_0/C:col10/1732332063296/Put/seqid=0 2024-11-23T03:21:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741947_1123 (size=13493) 2024-11-23T03:21:05,067 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8de610127b2f45e78209148171114ea7 2024-11-23T03:21:05,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3adffe86a93f4de88dd71518e5213cd6 is 50, key is test_row_0/B:col10/1732332063328/Put/seqid=0 2024-11-23T03:21:05,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741948_1124 (size=12301) 2024-11-23T03:21:05,408 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/c3e0a7e85125448598216b1e1d00cb14 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c3e0a7e85125448598216b1e1d00cb14 2024-11-23T03:21:05,413 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4dfbb59dd7b53f05c99615119ca9b6f4/C of 4dfbb59dd7b53f05c99615119ca9b6f4 into c3e0a7e85125448598216b1e1d00cb14(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:05,413 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:05,413 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4., storeName=4dfbb59dd7b53f05c99615119ca9b6f4/C, priority=13, startTime=1732332064562; duration=0sec 2024-11-23T03:21:05,414 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:05,414 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4dfbb59dd7b53f05c99615119ca9b6f4:C 2024-11-23T03:21:05,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:05,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. as already flushing 2024-11-23T03:21:05,456 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:61411 2024-11-23T03:21:05,456 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:05,480 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3adffe86a93f4de88dd71518e5213cd6 2024-11-23T03:21:05,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/b969e8858f3a4d549abbe53522ae5a74 is 50, key is test_row_0/C:col10/1732332063328/Put/seqid=0 2024-11-23T03:21:05,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741949_1125 (size=12301) 2024-11-23T03:21:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:05,892 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/b969e8858f3a4d549abbe53522ae5a74 2024-11-23T03:21:05,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/8de610127b2f45e78209148171114ea7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8de610127b2f45e78209148171114ea7 2024-11-23T03:21:05,900 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8de610127b2f45e78209148171114ea7, entries=150, sequenceid=511, filesize=12.0 K 2024-11-23T03:21:05,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/3adffe86a93f4de88dd71518e5213cd6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3adffe86a93f4de88dd71518e5213cd6 2024-11-23T03:21:05,905 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3adffe86a93f4de88dd71518e5213cd6, entries=150, sequenceid=511, filesize=12.0 K 2024-11-23T03:21:05,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/b969e8858f3a4d549abbe53522ae5a74 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/b969e8858f3a4d549abbe53522ae5a74 2024-11-23T03:21:05,910 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/b969e8858f3a4d549abbe53522ae5a74, entries=150, sequenceid=511, filesize=12.0 K 2024-11-23T03:21:05,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=6.71 KB/6870 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 1254ms, sequenceid=511, compaction requested=false 2024-11-23T03:21:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:05,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T03:21:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-23T03:21:05,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-23T03:21:05,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3270 sec 2024-11-23T03:21:05,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.3320 sec 2024-11-23T03:21:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T03:21:07,689 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-23T03:21:08,281 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:21:11,798 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T03:21:11,800 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T03:21:14,116 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:61411 2024-11-23T03:21:14,116 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:14,153 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:61411 2024-11-23T03:21:14,153 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:14,187 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:61411 2024-11-23T03:21:14,187 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:14,208 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:61411 2024-11-23T03:21:14,208 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:14,208 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:21:14,208 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 183 2024-11-23T03:21:14,208 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7429 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7342 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3220 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9653 rows 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3210 2024-11-23T03:21:14,209 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9629 rows 2024-11-23T03:21:14,209 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:21:14,209 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:61411 2024-11-23T03:21:14,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:14,212 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:21:14,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:21:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:14,224 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332074224"}]},"ts":"1732332074224"} 2024-11-23T03:21:14,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:14,225 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:21:14,227 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:21:14,228 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:21:14,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, UNASSIGN}] 2024-11-23T03:21:14,232 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, UNASSIGN 2024-11-23T03:21:14,233 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=4dfbb59dd7b53f05c99615119ca9b6f4, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:14,234 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:21:14,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:14,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:14,389 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:14,390 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:14,390 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:21:14,391 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 4dfbb59dd7b53f05c99615119ca9b6f4, disabling compactions & flushes 2024-11-23T03:21:14,391 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:14,391 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:14,391 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. after waiting 0 ms 2024-11-23T03:21:14,391 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:14,391 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 4dfbb59dd7b53f05c99615119ca9b6f4 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=A 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=B 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4dfbb59dd7b53f05c99615119ca9b6f4, store=C 2024-11-23T03:21:14,392 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:14,396 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/5703a0769c9d493ab3c286e8dfe03285 is 50, key is test_row_0/A:col10/1732332074207/Put/seqid=0 2024-11-23T03:21:14,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741950_1126 (size=12301) 2024-11-23T03:21:14,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:14,801 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/5703a0769c9d493ab3c286e8dfe03285 2024-11-23T03:21:14,809 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/595c55ba83904809a55d109267e1e65e is 50, key is test_row_0/B:col10/1732332074207/Put/seqid=0 2024-11-23T03:21:14,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741951_1127 (size=12301) 2024-11-23T03:21:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:15,214 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/595c55ba83904809a55d109267e1e65e 2024-11-23T03:21:15,221 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/cd5452bfec5243479c6ee84fc350feb7 is 50, key is test_row_0/C:col10/1732332074207/Put/seqid=0 2024-11-23T03:21:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741952_1128 (size=12301) 2024-11-23T03:21:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:15,626 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/cd5452bfec5243479c6ee84fc350feb7 2024-11-23T03:21:15,632 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/A/5703a0769c9d493ab3c286e8dfe03285 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/5703a0769c9d493ab3c286e8dfe03285 2024-11-23T03:21:15,636 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/5703a0769c9d493ab3c286e8dfe03285, entries=150, sequenceid=522, filesize=12.0 K 2024-11-23T03:21:15,637 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/B/595c55ba83904809a55d109267e1e65e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/595c55ba83904809a55d109267e1e65e 2024-11-23T03:21:15,642 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/595c55ba83904809a55d109267e1e65e, entries=150, sequenceid=522, filesize=12.0 K 2024-11-23T03:21:15,643 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/.tmp/C/cd5452bfec5243479c6ee84fc350feb7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cd5452bfec5243479c6ee84fc350feb7 2024-11-23T03:21:15,647 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cd5452bfec5243479c6ee84fc350feb7, entries=150, sequenceid=522, filesize=12.0 K 2024-11-23T03:21:15,648 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 4dfbb59dd7b53f05c99615119ca9b6f4 in 1257ms, sequenceid=522, compaction requested=true 2024-11-23T03:21:15,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/82f224aef2d0408bb31f44865479f20f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f0455fb3b41a4dc5a6cb795d85aed189, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/ae2cd3caaa0f471ab355abf49fe78280, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/176acb5738fa467c871427606e7b04da, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b6016a06c418448890e6c47f5e444545, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/6e5cc338d779432b893772ad1ea381f4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/cc4d78d63ac54126accda00e3e467259, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8246a7eea5d54bb58da25d95e1e56ebb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b0c81a9e7a0b4217bd09c8b514b8fec7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/64f5ca812c914252864f2ffdd758b4a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e] to archive 2024-11-23T03:21:15,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:15,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ef5d4ef47f94d1e8efd464a3a56e0f0 2024-11-23T03:21:15,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/4ad2a190fdd640ff8b181fecf3b5fa6a 2024-11-23T03:21:15,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/76eb3d0511cf4d53a6cd5f32701034a9 2024-11-23T03:21:15,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/82f224aef2d0408bb31f44865479f20f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/82f224aef2d0408bb31f44865479f20f 2024-11-23T03:21:15,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f5954b2605ab471d96ede3e8fd5d89f4 2024-11-23T03:21:15,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f0455fb3b41a4dc5a6cb795d85aed189 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/f0455fb3b41a4dc5a6cb795d85aed189 2024-11-23T03:21:15,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/38a69cc8d9734482acdd71124ccf9f7e 2024-11-23T03:21:15,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c8c6d747154e41849df6b6a82256b9fc 2024-11-23T03:21:15,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/03c349c5e7024a068e5d95d4573f6e9d 2024-11-23T03:21:15,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/ae2cd3caaa0f471ab355abf49fe78280 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/ae2cd3caaa0f471ab355abf49fe78280 2024-11-23T03:21:15,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5aa2cf65a414bdaa35efe916a9bb0b1 2024-11-23T03:21:15,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d5faa927d15c42b882f80b68d5680348 2024-11-23T03:21:15,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/11997243c06240498fe3a055a456d09b 2024-11-23T03:21:15,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/176acb5738fa467c871427606e7b04da to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/176acb5738fa467c871427606e7b04da 2024-11-23T03:21:15,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/56d027f43ba94659a0271c07b0691f1a 2024-11-23T03:21:15,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/c179245ee63f415eb8d1c3d9a89ec3b2 2024-11-23T03:21:15,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b6016a06c418448890e6c47f5e444545 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b6016a06c418448890e6c47f5e444545 2024-11-23T03:21:15,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/01d78a6e5441471389a141bf532d3e86 2024-11-23T03:21:15,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/7f60ed08588a4dada62cbf8dbe423b0d 2024-11-23T03:21:15,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/331a9849288c4b0396055baffc4d7297 2024-11-23T03:21:15,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/6e5cc338d779432b893772ad1ea381f4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/6e5cc338d779432b893772ad1ea381f4 2024-11-23T03:21:15,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8f2ac17472464e92a353e2de17401c73 2024-11-23T03:21:15,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/e84ee9882d084c52b7168805ef015f7a 2024-11-23T03:21:15,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/cc4d78d63ac54126accda00e3e467259 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/cc4d78d63ac54126accda00e3e467259 2024-11-23T03:21:15,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/751c8f4542bf47b1831ad663246af6ce 2024-11-23T03:21:15,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d8c1efcab5e74324944c844a2edac3fa 2024-11-23T03:21:15,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8246a7eea5d54bb58da25d95e1e56ebb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8246a7eea5d54bb58da25d95e1e56ebb 2024-11-23T03:21:15,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/942d4713e166483790d4a659d7303c15 2024-11-23T03:21:15,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b0c81a9e7a0b4217bd09c8b514b8fec7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/b0c81a9e7a0b4217bd09c8b514b8fec7 2024-11-23T03:21:15,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/506bb041ca1848de94cbbf88cb4d3255 2024-11-23T03:21:15,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/efae3a7fa84f46da8d2712c9916c6601 2024-11-23T03:21:15,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/62b4186e68dc4ea6b35f945637d932ba 2024-11-23T03:21:15,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/64f5ca812c914252864f2ffdd758b4a9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/64f5ca812c914252864f2ffdd758b4a9 2024-11-23T03:21:15,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/66bee1a81083408498ea40a9d71e7db4 2024-11-23T03:21:15,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/688246a2dc004be49396efa9c1ebaa8e 2024-11-23T03:21:15,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/16b77ce786384720b72f10474718526a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0aa8c34383164e0f8705cbc088c19e14, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/5819b47b49024c34846d1d9c9ff06459, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ca3da6585904f968bb11e09884955e7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8cb37788d4ab493da12fe2c12520a340, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/eaca0463d8cf459caded2d80eab0954e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6f7fc01ae438444f999cb2f7111fde21, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0d6966d5ba944efaa08889d6e5e82b40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f27ea7e6cbc24833944e08e2e79cac78, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8abd6ed365464902b3be8ddbf762f7b9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761] to archive 2024-11-23T03:21:15,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:15,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/ce5f5634966a4a289a0dc6b7c05270f1 2024-11-23T03:21:15,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/cc76d1ee635c4652a45c924273d48b31 2024-11-23T03:21:15,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/16b77ce786384720b72f10474718526a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/16b77ce786384720b72f10474718526a 2024-11-23T03:21:15,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0545c36863674ff3994b9b7b3cb596d7 2024-11-23T03:21:15,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/33d1b166ad7a43b88d310aa2f61710e5 2024-11-23T03:21:15,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0aa8c34383164e0f8705cbc088c19e14 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0aa8c34383164e0f8705cbc088c19e14 2024-11-23T03:21:15,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/a1f7748647ac4445b4e441f75c60d6de 2024-11-23T03:21:15,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ee84687c93247c8bbde6cedf390936f 2024-11-23T03:21:15,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/5819b47b49024c34846d1d9c9ff06459 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/5819b47b49024c34846d1d9c9ff06459 2024-11-23T03:21:15,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/27fb17000dcb49d4be52d7437ed124e7 2024-11-23T03:21:15,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f13068eaac04428caf48e417d29f7a94 2024-11-23T03:21:15,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/59f9c8eee2ff4ba39c61ea9f43b8b1df 2024-11-23T03:21:15,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ca3da6585904f968bb11e09884955e7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/4ca3da6585904f968bb11e09884955e7 2024-11-23T03:21:15,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3ddf872800f4483090a9fa9b8826b1f9 2024-11-23T03:21:15,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b8c27eb5cdbb477896838fad03c5516e 2024-11-23T03:21:15,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/887b3ee226ba4a838041c59ae78ab1a5 2024-11-23T03:21:15,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8cb37788d4ab493da12fe2c12520a340 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8cb37788d4ab493da12fe2c12520a340 2024-11-23T03:21:15,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c76e5caae0d4160993a0f01b9d10822 2024-11-23T03:21:15,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/22cad17295684cf3a6ab624c2e3586ad 2024-11-23T03:21:15,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/eaca0463d8cf459caded2d80eab0954e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/eaca0463d8cf459caded2d80eab0954e 2024-11-23T03:21:15,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3f458f015c724d4b9c3c6ef269f594b8 2024-11-23T03:21:15,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/86b4aadcea924f00a0fa3bfd63768cf1 2024-11-23T03:21:15,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6f7fc01ae438444f999cb2f7111fde21 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6f7fc01ae438444f999cb2f7111fde21 2024-11-23T03:21:15,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/7ecf281291e6409f924b9cbb247ae9b8 2024-11-23T03:21:15,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/963dfaf2c083450288f0e529c2cb30b8 2024-11-23T03:21:15,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0d6966d5ba944efaa08889d6e5e82b40 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0d6966d5ba944efaa08889d6e5e82b40 2024-11-23T03:21:15,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/6e22bacff76c4658aeeba2f251dcc191 2024-11-23T03:21:15,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/e763ea0e526347d3867d49f2e9496863 2024-11-23T03:21:15,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f27ea7e6cbc24833944e08e2e79cac78 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/f27ea7e6cbc24833944e08e2e79cac78 2024-11-23T03:21:15,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/d9170ac4137441bea2c022adf50e08ad 2024-11-23T03:21:15,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/042638efae104c298ffe512bde394526 2024-11-23T03:21:15,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8abd6ed365464902b3be8ddbf762f7b9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/8abd6ed365464902b3be8ddbf762f7b9 2024-11-23T03:21:15,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/9da6002b707e4031b1406b902fe366d6 2024-11-23T03:21:15,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c61e5eb638145978f303ca9ac3d0d2e 2024-11-23T03:21:15,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/0c6dd402cbfd46e6b4fda359fc71b761 2024-11-23T03:21:15,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cb29de31d75f4bda8a2af0ee6b06f9f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ae178b8c324d43a1a50789f054bd7b13, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27657c7dc72d49f4826ba3568efc2a56, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1ebdcfe3f1214b1baf90173c800a10a0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/097670f156f749f3884f40f70d516c71, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/79cc6568b5164e9784090b50598682a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/9accee464d06490ea7043d7d414a5d9c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ac8e91edc9524a4faaa58f792a9a2194, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e12f9ab865384feab0754c6603cecb3e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120] to archive 2024-11-23T03:21:15,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:15,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/8e6ebc119faa4199996681960e48c530 2024-11-23T03:21:15,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/eac5cffc7e154faeacb8aa8c48d937fb 2024-11-23T03:21:15,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cb29de31d75f4bda8a2af0ee6b06f9f9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cb29de31d75f4bda8a2af0ee6b06f9f9 2024-11-23T03:21:15,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/fb715793c8f1438ea868148f81ce69f9 2024-11-23T03:21:15,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/10f8a37975214c0ab0855847a84af4f9 2024-11-23T03:21:15,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ae178b8c324d43a1a50789f054bd7b13 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ae178b8c324d43a1a50789f054bd7b13 2024-11-23T03:21:15,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/60c6c042c5a54a149e47133a9df7eaf5 2024-11-23T03:21:15,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/33ff9115b247486eae5b1224bb0e5af5 2024-11-23T03:21:15,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/24e206f24f17414fa8a8c45d19c1029a 2024-11-23T03:21:15,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27657c7dc72d49f4826ba3568efc2a56 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27657c7dc72d49f4826ba3568efc2a56 2024-11-23T03:21:15,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/d28d95cae5454630a89dac482e971ab1 2024-11-23T03:21:15,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1f3211fc15d7478da6e7abda2aa58297 2024-11-23T03:21:15,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1ebdcfe3f1214b1baf90173c800a10a0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/1ebdcfe3f1214b1baf90173c800a10a0 2024-11-23T03:21:15,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/27754ca6e8b140bfa4ff2d48e4fda9d7 2024-11-23T03:21:15,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/59deb629ad2546d7b8df6d3b642684cb 2024-11-23T03:21:15,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/374adeb2237e4264a44f8e79201c5a51 2024-11-23T03:21:15,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/097670f156f749f3884f40f70d516c71 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/097670f156f749f3884f40f70d516c71 2024-11-23T03:21:15,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/30d5425728204803a946ccd7b98cf1bc 2024-11-23T03:21:15,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/15b44c0879314c8d838c38890f8abc0d 2024-11-23T03:21:15,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/79cc6568b5164e9784090b50598682a8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/79cc6568b5164e9784090b50598682a8 2024-11-23T03:21:15,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c303347dbe11461f8d6932e53d616e5d 2024-11-23T03:21:15,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5dc6deb5cccc4395baba0eae8cc277d8 2024-11-23T03:21:15,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/9accee464d06490ea7043d7d414a5d9c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/9accee464d06490ea7043d7d414a5d9c 2024-11-23T03:21:15,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/bf91e5e501d0403c8406da1a7b5f9371 2024-11-23T03:21:15,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/7729b9c5c39c40c5850a77520ca44403 2024-11-23T03:21:15,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ac8e91edc9524a4faaa58f792a9a2194 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/ac8e91edc9524a4faaa58f792a9a2194 2024-11-23T03:21:15,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/2514ec632016400cbe11e74a265fadd8 2024-11-23T03:21:15,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/5fa96ffb5f784b6cbf29c312ed065d16 2024-11-23T03:21:15,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/dcd67e4b4f1a4a5a9a8d1409a0c9036f 2024-11-23T03:21:15,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/470c36c1d0464d3f8fa02a466b2d421b 2024-11-23T03:21:15,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/962425e0bc054e47b8b8f3e1411f30d2 2024-11-23T03:21:15,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e12f9ab865384feab0754c6603cecb3e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e12f9ab865384feab0754c6603cecb3e 2024-11-23T03:21:15,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/e95285a3d11b4c948621f59f3d0b2b63 2024-11-23T03:21:15,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/87fc89f0aa5f46c68a31378abc8670e8 2024-11-23T03:21:15,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/414287f67d4445e589d0978ceb3d4120 2024-11-23T03:21:15,818 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/recovered.edits/525.seqid, newMaxSeqId=525, maxSeqId=1 2024-11-23T03:21:15,821 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4. 2024-11-23T03:21:15,821 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 4dfbb59dd7b53f05c99615119ca9b6f4: 2024-11-23T03:21:15,823 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:15,824 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=4dfbb59dd7b53f05c99615119ca9b6f4, regionState=CLOSED 2024-11-23T03:21:15,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-23T03:21:15,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 4dfbb59dd7b53f05c99615119ca9b6f4, server=0d51875c74df,34141,1732332039937 in 1.5910 sec 2024-11-23T03:21:15,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-23T03:21:15,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4dfbb59dd7b53f05c99615119ca9b6f4, UNASSIGN in 1.5950 sec 2024-11-23T03:21:15,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-23T03:21:15,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6010 sec 2024-11-23T03:21:15,832 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332075831"}]},"ts":"1732332075831"} 2024-11-23T03:21:15,833 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:21:15,835 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:21:15,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6170 sec 2024-11-23T03:21:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T03:21:16,329 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-23T03:21:16,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:21:16,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,337 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,338 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T03:21:16,341 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:16,345 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/recovered.edits] 2024-11-23T03:21:16,348 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/5703a0769c9d493ab3c286e8dfe03285 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/5703a0769c9d493ab3c286e8dfe03285 2024-11-23T03:21:16,350 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8de610127b2f45e78209148171114ea7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/8de610127b2f45e78209148171114ea7 2024-11-23T03:21:16,351 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d6a4d98a98204d13bbef15113ada43ac to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/A/d6a4d98a98204d13bbef15113ada43ac 2024-11-23T03:21:16,354 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3adffe86a93f4de88dd71518e5213cd6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/3adffe86a93f4de88dd71518e5213cd6 2024-11-23T03:21:16,355 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/595c55ba83904809a55d109267e1e65e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/595c55ba83904809a55d109267e1e65e 2024-11-23T03:21:16,357 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b4a35ecd85bc4ea484a64186b2bb8968 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/B/b4a35ecd85bc4ea484a64186b2bb8968 2024-11-23T03:21:16,359 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/b969e8858f3a4d549abbe53522ae5a74 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/b969e8858f3a4d549abbe53522ae5a74 2024-11-23T03:21:16,361 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c3e0a7e85125448598216b1e1d00cb14 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/c3e0a7e85125448598216b1e1d00cb14 2024-11-23T03:21:16,362 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cd5452bfec5243479c6ee84fc350feb7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/C/cd5452bfec5243479c6ee84fc350feb7 2024-11-23T03:21:16,365 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/recovered.edits/525.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4/recovered.edits/525.seqid 2024-11-23T03:21:16,366 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/4dfbb59dd7b53f05c99615119ca9b6f4 2024-11-23T03:21:16,366 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:21:16,370 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T03:21:16,378 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:21:16,409 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:21:16,410 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,410 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:21:16,410 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332076410"}]},"ts":"9223372036854775807"} 2024-11-23T03:21:16,413 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:21:16,413 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4dfbb59dd7b53f05c99615119ca9b6f4, NAME => 'TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:21:16,413 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:21:16,413 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332076413"}]},"ts":"9223372036854775807"} 2024-11-23T03:21:16,415 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:21:16,417 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-11-23T03:21:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T03:21:16,440 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-23T03:21:16,450 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3f04ca9-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3f04ca9-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3f04ca9-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;0d51875c74df:34141-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/0d51875c74df:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3f04ca9-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1309737130_22 at /127.0.0.1:57584 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=451 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=246 (was 148) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4524 (was 5090) 2024-11-23T03:21:16,459 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=246, ProcessCount=11, AvailableMemoryMB=4524 2024-11-23T03:21:16,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:21:16,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:21:16,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:16,464 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:21:16,464 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:16,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-11-23T03:21:16,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:16,465 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:21:16,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741953_1129 (size=960) 2024-11-23T03:21:16,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:16,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:16,873 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:21:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741954_1130 (size=53) 2024-11-23T03:21:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 52f48ab0d0f5382239ab1e35c83b29ed, disabling compactions & flushes 2024-11-23T03:21:17,280 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. after waiting 0 ms 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,280 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,280 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:17,281 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:21:17,282 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332077282"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332077282"}]},"ts":"1732332077282"} 2024-11-23T03:21:17,283 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:21:17,284 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:21:17,284 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332077284"}]},"ts":"1732332077284"} 2024-11-23T03:21:17,285 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:21:17,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, ASSIGN}] 2024-11-23T03:21:17,290 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, ASSIGN 2024-11-23T03:21:17,291 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:21:17,441 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:17,443 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:17,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:17,595 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:17,598 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,598 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:21:17,599 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,599 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:21:17,599 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,599 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,600 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,601 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:17,602 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName A 2024-11-23T03:21:17,602 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:17,602 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:17,602 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,603 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:17,604 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName B 2024-11-23T03:21:17,604 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:17,605 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:17,605 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,606 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:17,606 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName C 2024-11-23T03:21:17,606 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:17,607 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:17,607 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,607 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,608 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,609 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:21:17,610 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:17,612 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:21:17,612 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 52f48ab0d0f5382239ab1e35c83b29ed; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67240378, jitterRate=0.0019597113132476807}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:21:17,613 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:17,614 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., pid=37, masterSystemTime=1732332077595 2024-11-23T03:21:17,615 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,616 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:17,616 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:17,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-23T03:21:17,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 in 174 msec 2024-11-23T03:21:17,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-23T03:21:17,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, ASSIGN in 330 msec 2024-11-23T03:21:17,621 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:21:17,621 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332077621"}]},"ts":"1732332077621"} 2024-11-23T03:21:17,622 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:21:17,625 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:21:17,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1630 sec 2024-11-23T03:21:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-23T03:21:18,570 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-11-23T03:21:18,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04506927 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a9b9802 2024-11-23T03:21:18,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118b007e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:18,577 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:18,579 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:18,581 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:21:18,583 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:21:18,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:21:18,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:21:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:18,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741955_1131 (size=996) 2024-11-23T03:21:19,009 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T03:21:19,009 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T03:21:19,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:21:19,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, REOPEN/MOVE}] 2024-11-23T03:21:19,021 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, REOPEN/MOVE 2024-11-23T03:21:19,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,023 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:21:19,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:19,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,176 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,176 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:21:19,176 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 52f48ab0d0f5382239ab1e35c83b29ed, disabling compactions & flushes 2024-11-23T03:21:19,176 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,176 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,176 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. after waiting 0 ms 2024-11-23T03:21:19,176 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,180 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T03:21:19,180 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,181 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:19,181 WARN [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 52f48ab0d0f5382239ab1e35c83b29ed to self. 2024-11-23T03:21:19,182 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=CLOSED 2024-11-23T03:21:19,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-23T03:21:19,185 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, REOPEN/MOVE; state=CLOSED, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=true 2024-11-23T03:21:19,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 in 161 msec 2024-11-23T03:21:19,336 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:19,489 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,492 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,493 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:21:19,493 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,493 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:21:19,493 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,493 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,496 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,497 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:19,502 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName A 2024-11-23T03:21:19,504 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:19,505 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:19,505 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,506 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:19,506 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName B 2024-11-23T03:21:19,506 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:19,507 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:19,507 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,507 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:19,507 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f48ab0d0f5382239ab1e35c83b29ed columnFamilyName C 2024-11-23T03:21:19,507 DEBUG [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:19,508 INFO [StoreOpener-52f48ab0d0f5382239ab1e35c83b29ed-1 {}] regionserver.HStore(327): Store=52f48ab0d0f5382239ab1e35c83b29ed/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:19,508 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,509 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,510 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,511 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:21:19,513 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,513 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 52f48ab0d0f5382239ab1e35c83b29ed; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67138845, jitterRate=4.467517137527466E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:21:19,515 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:19,515 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., pid=42, masterSystemTime=1732332079489 2024-11-23T03:21:19,517 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,517 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,517 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=OPEN, openSeqNum=5, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-11-23T03:21:19,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 in 181 msec 2024-11-23T03:21:19,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-23T03:21:19,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, REOPEN/MOVE in 499 msec 2024-11-23T03:21:19,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-23T03:21:19,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 510 msec 2024-11-23T03:21:19,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 933 msec 2024-11-23T03:21:19,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-23T03:21:19,533 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7362d978 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cae6c5c 2024-11-23T03:21:19,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7d6279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-11-23T03:21:19,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-11-23T03:21:19,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,548 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-11-23T03:21:19,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,554 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-23T03:21:19,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-11-23T03:21:19,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,562 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-11-23T03:21:19,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-11-23T03:21:19,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-11-23T03:21:19,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:19,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:19,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-23T03:21:19,576 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:19,577 DEBUG [hconnection-0x14bed04e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,577 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:19,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:19,578 DEBUG [hconnection-0x1b7e183d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,578 DEBUG [hconnection-0x79bfd5bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,578 DEBUG [hconnection-0x34ae5286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,579 DEBUG [hconnection-0x3d4ff768-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,580 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,580 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,580 DEBUG [hconnection-0x61a943e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,580 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,580 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,580 DEBUG [hconnection-0x684dd52d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,581 DEBUG [hconnection-0x612c3cc7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,581 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:19,581 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,582 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:19,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:19,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:19,600 DEBUG [hconnection-0x2bccd049-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:19,602 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332139628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332139629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332139636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332139636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,643 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:19,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332139646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a7f681605d3044e891eadc596673c72f_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332079593/Put/seqid=0 2024-11-23T03:21:19,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:19,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741956_1132 (size=12154) 2024-11-23T03:21:19,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:19,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:19,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332139736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332139736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332139739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332139739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332139750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:19,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:19,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:19,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:19,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332139940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332139940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332139941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332139942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:19,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332139954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:20,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:20,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,091 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:20,096 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a7f681605d3044e891eadc596673c72f_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a7f681605d3044e891eadc596673c72f_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:20,097 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5b3c31e7d05f43dd94af6e43136ea960, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:20,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5b3c31e7d05f43dd94af6e43136ea960 is 175, key is test_row_0/A:col10/1732332079593/Put/seqid=0 2024-11-23T03:21:20,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741957_1133 (size=30955) 2024-11-23T03:21:20,113 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5b3c31e7d05f43dd94af6e43136ea960 2024-11-23T03:21:20,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/eccc971fd1624304bf0ce8db38b09053 is 50, key is test_row_0/B:col10/1732332079593/Put/seqid=0 2024-11-23T03:21:20,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741958_1134 (size=12001) 2024-11-23T03:21:20,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/eccc971fd1624304bf0ce8db38b09053 2024-11-23T03:21:20,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/fc8ded802a5b4c06bef99e0fd4c6828a is 50, key is test_row_0/C:col10/1732332079593/Put/seqid=0 2024-11-23T03:21:20,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:20,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:20,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:20,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741959_1135 (size=12001) 2024-11-23T03:21:20,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332140245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332140245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332140246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332140247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332140256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:20,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:20,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:20,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:20,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:20,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/fc8ded802a5b4c06bef99e0fd4c6828a 2024-11-23T03:21:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5b3c31e7d05f43dd94af6e43136ea960 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960 2024-11-23T03:21:20,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960, entries=150, sequenceid=15, filesize=30.2 K 2024-11-23T03:21:20,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/eccc971fd1624304bf0ce8db38b09053 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053 2024-11-23T03:21:20,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053, entries=150, sequenceid=15, filesize=11.7 K 2024-11-23T03:21:20,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/fc8ded802a5b4c06bef99e0fd4c6828a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a 2024-11-23T03:21:20,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a, entries=150, sequenceid=15, filesize=11.7 K 2024-11-23T03:21:20,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 52f48ab0d0f5382239ab1e35c83b29ed in 1043ms, sequenceid=15, compaction requested=false 2024-11-23T03:21:20,638 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T03:21:20,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:20,654 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-23T03:21:20,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,655 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:21:20,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:20,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:20,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:20,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232947793eba174adfac5bc6b8c16ad391_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332079610/Put/seqid=0 2024-11-23T03:21:20,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741960_1136 (size=12154) 2024-11-23T03:21:20,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:20,682 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232947793eba174adfac5bc6b8c16ad391_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232947793eba174adfac5bc6b8c16ad391_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:20,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/961f21045b354ebc9ad7b2482e0edc54, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:20,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/961f21045b354ebc9ad7b2482e0edc54 is 175, key is test_row_0/A:col10/1732332079610/Put/seqid=0 2024-11-23T03:21:20,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:20,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741961_1137 (size=30955) 2024-11-23T03:21:20,695 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/961f21045b354ebc9ad7b2482e0edc54 2024-11-23T03:21:20,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/9f1f48211ac94dfd8a452b0df78a68ad is 50, key is test_row_0/B:col10/1732332079610/Put/seqid=0 2024-11-23T03:21:20,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741962_1138 (size=12001) 2024-11-23T03:21:20,729 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/9f1f48211ac94dfd8a452b0df78a68ad 2024-11-23T03:21:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c322e0ae61c347afab01f581f92ae966 is 50, key is test_row_0/C:col10/1732332079610/Put/seqid=0 2024-11-23T03:21:20,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:20,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741963_1139 (size=12001) 2024-11-23T03:21:20,764 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c322e0ae61c347afab01f581f92ae966 2024-11-23T03:21:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/961f21045b354ebc9ad7b2482e0edc54 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54 2024-11-23T03:21:20,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332140763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,784 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T03:21:20,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/9f1f48211ac94dfd8a452b0df78a68ad as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad 2024-11-23T03:21:20,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332140770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332140772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332140779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:20,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332140779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:20,796 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T03:21:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c322e0ae61c347afab01f581f92ae966 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966 2024-11-23T03:21:20,802 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T03:21:20,803 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 52f48ab0d0f5382239ab1e35c83b29ed in 148ms, sequenceid=41, compaction requested=false 2024-11-23T03:21:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:20,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-23T03:21:20,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-23T03:21:20,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-23T03:21:20,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2280 sec 2024-11-23T03:21:20,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.2350 sec 2024-11-23T03:21:20,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:20,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:21:20,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:20,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235b329d4f3b9246a192a94e3ebdc141fa_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:20,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741964_1140 (size=14594) 2024-11-23T03:21:20,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332140995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332140995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332140999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332141000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332141101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332141102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332141107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332141108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332141304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332141307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332141311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332141311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,366 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:21,366 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:21:21,371 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235b329d4f3b9246a192a94e3ebdc141fa_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b329d4f3b9246a192a94e3ebdc141fa_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:21,373 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/da6d5824f4d34d7cb3b156ba2dde985b, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:21,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/da6d5824f4d34d7cb3b156ba2dde985b is 175, key is test_row_0/A:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741965_1141 (size=39549) 2024-11-23T03:21:21,379 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/da6d5824f4d34d7cb3b156ba2dde985b 2024-11-23T03:21:21,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0757d071eb7e4fd6be53ed614387123b is 50, key is test_row_0/B:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741966_1142 (size=12001) 2024-11-23T03:21:21,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0757d071eb7e4fd6be53ed614387123b 2024-11-23T03:21:21,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c8b24a3491c4991a4cb01577bea73f0 is 50, key is test_row_0/C:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741967_1143 (size=12001) 2024-11-23T03:21:21,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332141607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332141612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332141616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332141616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-23T03:21:21,692 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-23T03:21:21,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:21,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-23T03:21:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:21,696 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:21,696 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:21,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:21,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:21,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332141781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:21,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c8b24a3491c4991a4cb01577bea73f0 2024-11-23T03:21:21,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/da6d5824f4d34d7cb3b156ba2dde985b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b 2024-11-23T03:21:21,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b, entries=200, sequenceid=56, filesize=38.6 K 2024-11-23T03:21:21,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0757d071eb7e4fd6be53ed614387123b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b 2024-11-23T03:21:21,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:21,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T03:21:21,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:21,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:21,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:21,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:21,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b, entries=150, sequenceid=56, filesize=11.7 K 2024-11-23T03:21:21,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c8b24a3491c4991a4cb01577bea73f0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0 2024-11-23T03:21:21,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0, entries=150, sequenceid=56, filesize=11.7 K 2024-11-23T03:21:21,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 52f48ab0d0f5382239ab1e35c83b29ed in 938ms, sequenceid=56, compaction requested=true 2024-11-23T03:21:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:21,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:21,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:21,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:21,871 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:21,871 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:21,872 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,872 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=99.1 K 2024-11-23T03:21:21,872 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b] 2024-11-23T03:21:21,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b3c31e7d05f43dd94af6e43136ea960, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732332079588 2024-11-23T03:21:21,873 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:21,873 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:21,873 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,873 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.2 K 2024-11-23T03:21:21,873 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 961f21045b354ebc9ad7b2482e0edc54, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332079610 2024-11-23T03:21:21,874 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting eccc971fd1624304bf0ce8db38b09053, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732332079588 2024-11-23T03:21:21,874 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting da6d5824f4d34d7cb3b156ba2dde985b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:21,874 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f1f48211ac94dfd8a452b0df78a68ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332079610 2024-11-23T03:21:21,875 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0757d071eb7e4fd6be53ed614387123b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:21,886 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:21,890 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#124 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:21,890 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b1e9d8fb17cb4bd1b29c839f8696c1b1 is 50, key is test_row_0/B:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,890 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112305df539e5f794635aa766855de4db98b_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:21,893 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112305df539e5f794635aa766855de4db98b_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:21,893 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112305df539e5f794635aa766855de4db98b_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:21,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741969_1145 (size=4469) 2024-11-23T03:21:21,932 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#123 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:21,934 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/d6c673468cba437e862c21a26bdd6072 is 175, key is test_row_0/A:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741968_1144 (size=12104) 2024-11-23T03:21:21,963 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b1e9d8fb17cb4bd1b29c839f8696c1b1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b1e9d8fb17cb4bd1b29c839f8696c1b1 2024-11-23T03:21:21,971 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into b1e9d8fb17cb4bd1b29c839f8696c1b1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:21,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:21,971 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332081871; duration=0sec 2024-11-23T03:21:21,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:21,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:21,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:21,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741970_1146 (size=31058) 2024-11-23T03:21:21,973 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:21,974 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:21,974 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:21,974 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.2 K 2024-11-23T03:21:21,974 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fc8ded802a5b4c06bef99e0fd4c6828a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732332079588 2024-11-23T03:21:21,975 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c322e0ae61c347afab01f581f92ae966, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332079610 2024-11-23T03:21:21,975 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c8b24a3491c4991a4cb01577bea73f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:21,986 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:21,986 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f23e258e7a2f47c6a61977b58baa95b1 is 50, key is test_row_0/C:col10/1732332080771/Put/seqid=0 2024-11-23T03:21:21,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741971_1147 (size=12104) 2024-11-23T03:21:21,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:22,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,003 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f23e258e7a2f47c6a61977b58baa95b1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f23e258e7a2f47c6a61977b58baa95b1 2024-11-23T03:21:22,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T03:21:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:22,004 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:21:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:22,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:22,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:22,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:22,009 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into f23e258e7a2f47c6a61977b58baa95b1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:22,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:22,009 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332081871; duration=0sec 2024-11-23T03:21:22,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:22,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:22,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123999766c6cd774580bc6d9c6e37143558_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332080998/Put/seqid=0 2024-11-23T03:21:22,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741972_1148 (size=12154) 2024-11-23T03:21:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:22,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:22,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332142148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332142148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332142150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332142150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332142254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332142255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332142255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332142255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:22,381 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/d6c673468cba437e862c21a26bdd6072 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072 2024-11-23T03:21:22,389 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into d6c673468cba437e862c21a26bdd6072(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:22,389 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:22,389 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332081870; duration=0sec 2024-11-23T03:21:22,390 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:22,390 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:22,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:22,434 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123999766c6cd774580bc6d9c6e37143558_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123999766c6cd774580bc6d9c6e37143558_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:22,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e96314f48d464c27ae1ac1c8bcad2ae0, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:22,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e96314f48d464c27ae1ac1c8bcad2ae0 is 175, key is test_row_0/A:col10/1732332080998/Put/seqid=0 2024-11-23T03:21:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741973_1149 (size=30955) 2024-11-23T03:21:22,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332142459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332142460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332142460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332142461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332142764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332142764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332142766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332142766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:22,854 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e96314f48d464c27ae1ac1c8bcad2ae0 2024-11-23T03:21:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b264d2f3c5a24bbeb318c3e8ccb7257f is 50, key is test_row_0/B:col10/1732332080998/Put/seqid=0 2024-11-23T03:21:22,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741974_1150 (size=12001) 2024-11-23T03:21:22,906 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b264d2f3c5a24bbeb318c3e8ccb7257f 2024-11-23T03:21:22,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/daec93b6af77403180fe477cd07d79fe is 50, key is test_row_0/C:col10/1732332080998/Put/seqid=0 2024-11-23T03:21:22,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741975_1151 (size=12001) 2024-11-23T03:21:22,924 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/daec93b6af77403180fe477cd07d79fe 2024-11-23T03:21:22,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e96314f48d464c27ae1ac1c8bcad2ae0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0 2024-11-23T03:21:22,936 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0, entries=150, sequenceid=79, filesize=30.2 K 2024-11-23T03:21:22,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b264d2f3c5a24bbeb318c3e8ccb7257f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f 2024-11-23T03:21:22,945 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T03:21:22,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/daec93b6af77403180fe477cd07d79fe as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe 2024-11-23T03:21:22,961 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T03:21:22,963 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 52f48ab0d0f5382239ab1e35c83b29ed in 959ms, sequenceid=79, compaction requested=false 2024-11-23T03:21:22,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:22,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:22,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-23T03:21:22,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-23T03:21:22,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-23T03:21:22,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2680 sec 2024-11-23T03:21:22,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.2730 sec 2024-11-23T03:21:23,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:23,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:21:23,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:23,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:23,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:23,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231937d31b41f945b289519a834760ad64_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741976_1152 (size=17034) 2024-11-23T03:21:23,293 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:23,298 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231937d31b41f945b289519a834760ad64_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231937d31b41f945b289519a834760ad64_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332143294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332143294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,300 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5d9b40990bae42ac9c9ac86dcae636aa, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:23,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5d9b40990bae42ac9c9ac86dcae636aa is 175, key is test_row_0/A:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332143298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332143299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741977_1153 (size=48139) 2024-11-23T03:21:23,330 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5d9b40990bae42ac9c9ac86dcae636aa 2024-11-23T03:21:23,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/297ebe6726cb4beabf0f79a09a8224c5 is 50, key is test_row_0/B:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741978_1154 (size=12001) 2024-11-23T03:21:23,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/297ebe6726cb4beabf0f79a09a8224c5 2024-11-23T03:21:23,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/822843c48f4346328b4b3773590fae8b is 50, key is test_row_0/C:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741979_1155 (size=12001) 2024-11-23T03:21:23,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/822843c48f4346328b4b3773590fae8b 2024-11-23T03:21:23,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332143400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332143400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332143402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332143405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/5d9b40990bae42ac9c9ac86dcae636aa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa 2024-11-23T03:21:23,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa, entries=250, sequenceid=97, filesize=47.0 K 2024-11-23T03:21:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/297ebe6726cb4beabf0f79a09a8224c5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5 2024-11-23T03:21:23,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5, entries=150, sequenceid=97, filesize=11.7 K 2024-11-23T03:21:23,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/822843c48f4346328b4b3773590fae8b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b 2024-11-23T03:21:23,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b, entries=150, sequenceid=97, filesize=11.7 K 2024-11-23T03:21:23,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 52f48ab0d0f5382239ab1e35c83b29ed in 161ms, sequenceid=97, compaction requested=true 2024-11-23T03:21:23,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:23,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:23,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:23,431 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:23,431 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:23,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:23,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:23,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:23,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:23,433 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110152 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:23,433 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:23,433 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:23,433 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:23,433 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,433 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,433 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b1e9d8fb17cb4bd1b29c839f8696c1b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.3 K 2024-11-23T03:21:23,433 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=107.6 K 2024-11-23T03:21:23,433 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,434 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa] 2024-11-23T03:21:23,434 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b1e9d8fb17cb4bd1b29c839f8696c1b1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:23,434 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6c673468cba437e862c21a26bdd6072, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:23,435 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b264d2f3c5a24bbeb318c3e8ccb7257f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332080994 2024-11-23T03:21:23,435 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e96314f48d464c27ae1ac1c8bcad2ae0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332080994 2024-11-23T03:21:23,436 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 297ebe6726cb4beabf0f79a09a8224c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082147 2024-11-23T03:21:23,436 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d9b40990bae42ac9c9ac86dcae636aa, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082140 2024-11-23T03:21:23,454 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:23,455 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0b35a673f8a94005855694bbc89eb6cf is 50, key is test_row_0/B:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,457 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:23,461 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123bf5f7709e0b64585a1bd47a12f2ae1f1_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:23,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741980_1156 (size=12207) 2024-11-23T03:21:23,464 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123bf5f7709e0b64585a1bd47a12f2ae1f1_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:23,464 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123bf5f7709e0b64585a1bd47a12f2ae1f1_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:23,472 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0b35a673f8a94005855694bbc89eb6cf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b35a673f8a94005855694bbc89eb6cf 2024-11-23T03:21:23,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741981_1157 (size=4469) 2024-11-23T03:21:23,478 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into 0b35a673f8a94005855694bbc89eb6cf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:23,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:23,478 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332083431; duration=0sec 2024-11-23T03:21:23,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:23,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:23,479 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:23,480 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#133 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:23,481 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/27bc3cee21694fbd9b714e879dd2657e is 175, key is test_row_0/A:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,486 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:23,486 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:23,486 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,487 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f23e258e7a2f47c6a61977b58baa95b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.3 K 2024-11-23T03:21:23,487 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f23e258e7a2f47c6a61977b58baa95b1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732332080771 2024-11-23T03:21:23,487 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting daec93b6af77403180fe477cd07d79fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332080994 2024-11-23T03:21:23,488 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 822843c48f4346328b4b3773590fae8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082147 2024-11-23T03:21:23,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741982_1158 (size=31161) 2024-11-23T03:21:23,508 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#134 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:23,509 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/620316b3248244f1836189af447c3fd4 is 50, key is test_row_0/C:col10/1732332083268/Put/seqid=0 2024-11-23T03:21:23,526 INFO [master/0d51875c74df:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T03:21:23,526 INFO [master/0d51875c74df:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T03:21:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741983_1159 (size=12207) 2024-11-23T03:21:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:23,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:21:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:23,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236b0b85616a6d4decadf387e28e6cfdf2_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332083292/Put/seqid=0 2024-11-23T03:21:23,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741984_1160 (size=17034) 2024-11-23T03:21:23,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332143653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332143654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332143656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332143657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332143757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332143758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332143760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332143760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332143795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,796 DEBUG [Thread-648 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4224 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:23,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T03:21:23,802 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-23T03:21:23,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:23,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-23T03:21:23,807 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:23,808 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:23,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:23,910 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/27bc3cee21694fbd9b714e879dd2657e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e 2024-11-23T03:21:23,922 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into 27bc3cee21694fbd9b714e879dd2657e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:23,922 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:23,922 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332083431; duration=0sec 2024-11-23T03:21:23,922 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:23,922 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:23,937 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/620316b3248244f1836189af447c3fd4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/620316b3248244f1836189af447c3fd4 2024-11-23T03:21:23,945 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into 620316b3248244f1836189af447c3fd4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:23,945 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:23,945 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332083431; duration=0sec 2024-11-23T03:21:23,945 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:23,946 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:23,959 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:23,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:23,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:23,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332143959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:23,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:23,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:23,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332143961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332143965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:23,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332143968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,042 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:24,047 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236b0b85616a6d4decadf387e28e6cfdf2_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b0b85616a6d4decadf387e28e6cfdf2_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:24,051 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/227601f6beb44e1db3f37b019f7d4891, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:24,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/227601f6beb44e1db3f37b019f7d4891 is 175, key is test_row_0/A:col10/1732332083292/Put/seqid=0 2024-11-23T03:21:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741985_1161 (size=48139) 2024-11-23T03:21:24,068 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/227601f6beb44e1db3f37b019f7d4891 2024-11-23T03:21:24,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/afa877c0de964d74b5dc86b5f4e77d40 is 50, key is test_row_0/B:col10/1732332083292/Put/seqid=0 2024-11-23T03:21:24,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:24,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741986_1162 (size=12001) 2024-11-23T03:21:24,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332144262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332144266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332144272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332144272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:24,421 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/afa877c0de964d74b5dc86b5f4e77d40 2024-11-23T03:21:24,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/10b7483ed0ac43a2822cea64394ce81a is 50, key is test_row_0/C:col10/1732332083292/Put/seqid=0 2024-11-23T03:21:24,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741987_1163 (size=12001) 2024-11-23T03:21:24,576 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,729 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332144767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332144773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332144776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332144778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:24,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:24,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:24,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/10b7483ed0ac43a2822cea64394ce81a 2024-11-23T03:21:24,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/227601f6beb44e1db3f37b019f7d4891 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891 2024-11-23T03:21:24,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891, entries=250, sequenceid=121, filesize=47.0 K 2024-11-23T03:21:24,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/afa877c0de964d74b5dc86b5f4e77d40 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40 2024-11-23T03:21:24,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40, entries=150, sequenceid=121, filesize=11.7 K 2024-11-23T03:21:24,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/10b7483ed0ac43a2822cea64394ce81a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a 2024-11-23T03:21:24,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a, entries=150, sequenceid=121, filesize=11.7 K 2024-11-23T03:21:24,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 52f48ab0d0f5382239ab1e35c83b29ed in 1366ms, sequenceid=121, compaction requested=false 2024-11-23T03:21:24,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:25,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T03:21:25,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:25,035 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:21:25,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:25,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:25,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb60d114a9dd4dda878704f3789a947c_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332083655/Put/seqid=0 2024-11-23T03:21:25,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741988_1164 (size=12304) 2024-11-23T03:21:25,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:25,455 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb60d114a9dd4dda878704f3789a947c_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb60d114a9dd4dda878704f3789a947c_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:25,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/a3a74f8d64364cc09a04612aa4e849f2, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:25,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/a3a74f8d64364cc09a04612aa4e849f2 is 175, key is test_row_0/A:col10/1732332083655/Put/seqid=0 2024-11-23T03:21:25,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741989_1165 (size=31105) 2024-11-23T03:21:25,478 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/a3a74f8d64364cc09a04612aa4e849f2 2024-11-23T03:21:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/94451afe46214eee9880f0a4ef133185 is 50, key is test_row_0/B:col10/1732332083655/Put/seqid=0 2024-11-23T03:21:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741990_1166 (size=12151) 2024-11-23T03:21:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:25,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:25,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332145807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332145810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332145810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332145809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,907 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/94451afe46214eee9880f0a4ef133185 2024-11-23T03:21:25,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332145912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:25,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332145914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332145914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1305eb4c54ff48229091943f43386338 is 50, key is test_row_0/C:col10/1732332083655/Put/seqid=0 2024-11-23T03:21:25,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332145915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:25,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741991_1167 (size=12151) 2024-11-23T03:21:26,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332146115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332146116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332146117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332146118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,321 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1305eb4c54ff48229091943f43386338 2024-11-23T03:21:26,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/a3a74f8d64364cc09a04612aa4e849f2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2 2024-11-23T03:21:26,332 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2, entries=150, sequenceid=137, filesize=30.4 K 2024-11-23T03:21:26,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/94451afe46214eee9880f0a4ef133185 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185 2024-11-23T03:21:26,338 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185, entries=150, sequenceid=137, filesize=11.9 K 2024-11-23T03:21:26,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1305eb4c54ff48229091943f43386338 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338 2024-11-23T03:21:26,344 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338, entries=150, sequenceid=137, filesize=11.9 K 2024-11-23T03:21:26,345 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 52f48ab0d0f5382239ab1e35c83b29ed in 1310ms, sequenceid=137, compaction requested=true 2024-11-23T03:21:26,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:26,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:26,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-23T03:21:26,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-23T03:21:26,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-23T03:21:26,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5390 sec 2024-11-23T03:21:26,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.5440 sec 2024-11-23T03:21:26,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:26,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:26,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:26,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332146431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332146431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332146435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332146436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c80c661b4a845629b47654177715244_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:26,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741992_1168 (size=17284) 2024-11-23T03:21:26,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332146536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332146537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332146539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332146539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332146743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332146743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332146743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332146744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:26,854 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:26,859 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c80c661b4a845629b47654177715244_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c80c661b4a845629b47654177715244_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:26,860 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/309ac007588048f9a1427dedfc23bf79, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:26,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/309ac007588048f9a1427dedfc23bf79 is 175, key is test_row_0/A:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:26,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741993_1169 (size=48389) 2024-11-23T03:21:27,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332147049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332147049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332147050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332147050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,266 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/309ac007588048f9a1427dedfc23bf79 2024-11-23T03:21:27,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b246801bae7243a7b78feea845ef76d7 is 50, key is test_row_0/B:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:27,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741994_1170 (size=12151) 2024-11-23T03:21:27,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332147554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332147554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332147558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332147558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b246801bae7243a7b78feea845ef76d7 2024-11-23T03:21:27,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f33ea53725f14c60928139ce5817f76c is 50, key is test_row_0/C:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:27,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741995_1171 (size=12151) 2024-11-23T03:21:27,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332147806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:27,808 DEBUG [Thread-648 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8236 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T03:21:27,915 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-23T03:21:27,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-23T03:21:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:27,918 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:27,918 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:27,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:28,070 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T03:21:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:28,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:28,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f33ea53725f14c60928139ce5817f76c 2024-11-23T03:21:28,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/309ac007588048f9a1427dedfc23bf79 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79 2024-11-23T03:21:28,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79, entries=250, sequenceid=161, filesize=47.3 K 2024-11-23T03:21:28,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b246801bae7243a7b78feea845ef76d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7 2024-11-23T03:21:28,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7, entries=150, sequenceid=161, filesize=11.9 K 2024-11-23T03:21:28,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f33ea53725f14c60928139ce5817f76c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c 2024-11-23T03:21:28,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c, entries=150, sequenceid=161, filesize=11.9 K 2024-11-23T03:21:28,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 52f48ab0d0f5382239ab1e35c83b29ed in 1744ms, sequenceid=161, compaction requested=true 2024-11-23T03:21:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:28,166 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:28,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:28,167 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:28,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:28,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:28,169 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 158794 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:28,169 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:28,169 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,169 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=155.1 K 2024-11-23T03:21:28,169 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,169 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79] 2024-11-23T03:21:28,169 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:28,169 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:28,170 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,170 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b35a673f8a94005855694bbc89eb6cf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=47.4 K 2024-11-23T03:21:28,170 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27bc3cee21694fbd9b714e879dd2657e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082147 2024-11-23T03:21:28,170 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b35a673f8a94005855694bbc89eb6cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082147 2024-11-23T03:21:28,171 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 227601f6beb44e1db3f37b019f7d4891, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732332083292 2024-11-23T03:21:28,171 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting afa877c0de964d74b5dc86b5f4e77d40, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732332083292 2024-11-23T03:21:28,171 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3a74f8d64364cc09a04612aa4e849f2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732332083652 2024-11-23T03:21:28,172 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 94451afe46214eee9880f0a4ef133185, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732332083652 2024-11-23T03:21:28,172 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 309ac007588048f9a1427dedfc23bf79, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:28,172 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b246801bae7243a7b78feea845ef76d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:28,207 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:28,210 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#145 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:28,211 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/dbd7a27dc2224ad58fcf2636abfefd59 is 50, key is test_row_0/B:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:28,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:28,221 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411238e86ce668ea94372abacc27cf2792a10_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:28,224 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411238e86ce668ea94372abacc27cf2792a10_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:28,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,224 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238e86ce668ea94372abacc27cf2792a10_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:28,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T03:21:28,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,225 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T03:21:28,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:28,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:28,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:28,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:28,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:28,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:28,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741996_1172 (size=12493) 2024-11-23T03:21:28,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232a3f1ee3a7e14e759a1bb0ade99ba2d2_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332086434/Put/seqid=0 2024-11-23T03:21:28,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741997_1173 (size=4469) 2024-11-23T03:21:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741998_1174 (size=12304) 2024-11-23T03:21:28,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:28,315 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232a3f1ee3a7e14e759a1bb0ade99ba2d2_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a3f1ee3a7e14e759a1bb0ade99ba2d2_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:28,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/7074f692e15c41758febdebd457626d4, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:28,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/7074f692e15c41758febdebd457626d4 is 175, key is test_row_0/A:col10/1732332086434/Put/seqid=0 2024-11-23T03:21:28,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741999_1175 (size=31105) 2024-11-23T03:21:28,341 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/7074f692e15c41758febdebd457626d4 2024-11-23T03:21:28,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/30dcc965d2a64f1d8875e7e5d08d965d is 50, key is test_row_0/B:col10/1732332086434/Put/seqid=0 2024-11-23T03:21:28,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742000_1176 (size=12151) 2024-11-23T03:21:28,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:28,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:28,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:28,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332148609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332148612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332148613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332148614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,676 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/dbd7a27dc2224ad58fcf2636abfefd59 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/dbd7a27dc2224ad58fcf2636abfefd59 2024-11-23T03:21:28,681 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into dbd7a27dc2224ad58fcf2636abfefd59(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:28,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:28,681 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=12, startTime=1732332088166; duration=0sec 2024-11-23T03:21:28,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:28,682 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:28,682 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:28,683 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:28,683 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:28,683 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:28,683 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/620316b3248244f1836189af447c3fd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=47.4 K 2024-11-23T03:21:28,684 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 620316b3248244f1836189af447c3fd4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732332082147 2024-11-23T03:21:28,684 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 10b7483ed0ac43a2822cea64394ce81a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732332083292 2024-11-23T03:21:28,685 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1305eb4c54ff48229091943f43386338, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732332083652 2024-11-23T03:21:28,685 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f33ea53725f14c60928139ce5817f76c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:28,695 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#148 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:28,695 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/66a7d349f05143a5a311c65fc531608b is 50, key is test_row_0/C:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:28,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742001_1177 (size=12493) 2024-11-23T03:21:28,705 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/66a7d349f05143a5a311c65fc531608b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/66a7d349f05143a5a311c65fc531608b 2024-11-23T03:21:28,705 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#144 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:28,706 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9a6565ae7ed2474aabfd500756fa33ec is 175, key is test_row_0/A:col10/1732332086420/Put/seqid=0 2024-11-23T03:21:28,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into 66a7d349f05143a5a311c65fc531608b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:28,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:28,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=12, startTime=1732332088167; duration=0sec 2024-11-23T03:21:28,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:28,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:28,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332148715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742002_1178 (size=31447) 2024-11-23T03:21:28,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332148718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,730 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9a6565ae7ed2474aabfd500756fa33ec as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec 2024-11-23T03:21:28,737 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into 9a6565ae7ed2474aabfd500756fa33ec(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:28,737 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:28,737 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=12, startTime=1732332088166; duration=0sec 2024-11-23T03:21:28,737 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:28,737 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:28,762 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/30dcc965d2a64f1d8875e7e5d08d965d 2024-11-23T03:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/14160160a9ab4e39a73587464b544178 is 50, key is test_row_0/C:col10/1732332086434/Put/seqid=0 2024-11-23T03:21:28,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742003_1179 (size=12151) 2024-11-23T03:21:28,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332148917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332148919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332148920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:28,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332148921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:29,184 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/14160160a9ab4e39a73587464b544178 2024-11-23T03:21:29,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/7074f692e15c41758febdebd457626d4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4 2024-11-23T03:21:29,195 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4, entries=150, sequenceid=174, filesize=30.4 K 2024-11-23T03:21:29,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/30dcc965d2a64f1d8875e7e5d08d965d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d 2024-11-23T03:21:29,201 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T03:21:29,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/14160160a9ab4e39a73587464b544178 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178 2024-11-23T03:21:29,214 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T03:21:29,215 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 52f48ab0d0f5382239ab1e35c83b29ed in 990ms, sequenceid=174, compaction requested=false 2024-11-23T03:21:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:29,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-23T03:21:29,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-23T03:21:29,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-23T03:21:29,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2980 sec 2024-11-23T03:21:29,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.3020 sec 2024-11-23T03:21:29,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:29,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:29,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332149234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f63ce3b78d5047ef8eb6d0b390d29953_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:29,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332149236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332149237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332149238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742004_1180 (size=12304) 2024-11-23T03:21:29,243 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:29,247 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f63ce3b78d5047ef8eb6d0b390d29953_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f63ce3b78d5047ef8eb6d0b390d29953_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:29,248 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f71b3cc8a35446a6a023d777c3f8917f, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:29,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f71b3cc8a35446a6a023d777c3f8917f is 175, key is test_row_0/A:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:29,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742005_1181 (size=31105) 2024-11-23T03:21:29,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332149337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332149341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332149341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332149341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332149540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332149543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332149544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332149545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,658 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=203, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f71b3cc8a35446a6a023d777c3f8917f 2024-11-23T03:21:29,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/3cf7164106e2410bb7a4f9771a6ab39a is 50, key is test_row_0/B:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:29,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742006_1182 (size=12151) 2024-11-23T03:21:29,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332149845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332149847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332149849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:29,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332149850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T03:21:30,022 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-23T03:21:30,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-23T03:21:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:30,025 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:30,026 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:30,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:30,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/3cf7164106e2410bb7a4f9771a6ab39a 2024-11-23T03:21:30,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5644ecf3d78d41f18a8bf8c34b47f821 is 50, key is test_row_0/C:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:30,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742007_1183 (size=12151) 2024-11-23T03:21:30,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:30,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T03:21:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:30,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:30,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T03:21:30,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:30,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332150349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332150352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332150353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332150353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T03:21:30,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:30,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:30,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5644ecf3d78d41f18a8bf8c34b47f821 2024-11-23T03:21:30,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f71b3cc8a35446a6a023d777c3f8917f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f 2024-11-23T03:21:30,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f, entries=150, sequenceid=203, filesize=30.4 K 2024-11-23T03:21:30,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/3cf7164106e2410bb7a4f9771a6ab39a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a 2024-11-23T03:21:30,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a, entries=150, sequenceid=203, filesize=11.9 K 2024-11-23T03:21:30,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5644ecf3d78d41f18a8bf8c34b47f821 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821 2024-11-23T03:21:30,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821, entries=150, sequenceid=203, filesize=11.9 K 2024-11-23T03:21:30,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 52f48ab0d0f5382239ab1e35c83b29ed in 1310ms, sequenceid=203, compaction requested=true 2024-11-23T03:21:30,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:30,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:30,534 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:30,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:30,534 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:30,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:30,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:30,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:30,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:30,537 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,537 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,537 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/dbd7a27dc2224ad58fcf2636abfefd59, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.9 K 2024-11-23T03:21:30,537 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=91.5 K 2024-11-23T03:21:30,537 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f] 2024-11-23T03:21:30,537 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting dbd7a27dc2224ad58fcf2636abfefd59, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:30,538 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a6565ae7ed2474aabfd500756fa33ec, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:30,538 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 30dcc965d2a64f1d8875e7e5d08d965d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732332086430 2024-11-23T03:21:30,538 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7074f692e15c41758febdebd457626d4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732332086430 2024-11-23T03:21:30,538 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cf7164106e2410bb7a4f9771a6ab39a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:30,539 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f71b3cc8a35446a6a023d777c3f8917f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:30,546 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:30,547 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:30,548 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/13cd6812049a4754911bfec282381665 is 50, key is test_row_0/B:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:30,548 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123cdcaaf036e99475782a204329806d892_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:30,550 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123cdcaaf036e99475782a204329806d892_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:30,550 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cdcaaf036e99475782a204329806d892_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:30,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742008_1184 (size=12595) 2024-11-23T03:21:30,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742009_1185 (size=4469) 2024-11-23T03:21:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:30,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:30,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T03:21:30,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,637 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:30,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:30,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230fb5567849f649eaacd6649ae90e101f_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332089234/Put/seqid=0 2024-11-23T03:21:30,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742010_1186 (size=12304) 2024-11-23T03:21:30,960 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#154 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:30,960 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/6d24da886d8a4fe39e996290af85c4a0 is 175, key is test_row_0/A:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:30,961 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/13cd6812049a4754911bfec282381665 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/13cd6812049a4754911bfec282381665 2024-11-23T03:21:30,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742011_1187 (size=31549) 2024-11-23T03:21:30,968 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into 13cd6812049a4754911bfec282381665(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:30,968 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:30,968 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332090534; duration=0sec 2024-11-23T03:21:30,968 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:30,968 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:30,968 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:30,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:30,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:30,971 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:30,971 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/66a7d349f05143a5a311c65fc531608b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=35.9 K 2024-11-23T03:21:30,971 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 66a7d349f05143a5a311c65fc531608b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732332085805 2024-11-23T03:21:30,972 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 14160160a9ab4e39a73587464b544178, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732332086430 2024-11-23T03:21:30,972 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5644ecf3d78d41f18a8bf8c34b47f821, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:30,980 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:30,981 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c10a2b184c2749e0b342366920f18a58 is 50, key is test_row_0/C:col10/1732332089224/Put/seqid=0 2024-11-23T03:21:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742012_1188 (size=12595) 2024-11-23T03:21:31,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:31,056 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230fb5567849f649eaacd6649ae90e101f_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230fb5567849f649eaacd6649ae90e101f_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:31,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/15a3fceac3be40cb9fcf16ba3b53989a, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:31,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/15a3fceac3be40cb9fcf16ba3b53989a is 175, key is test_row_0/A:col10/1732332089234/Put/seqid=0 2024-11-23T03:21:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742013_1189 (size=31105) 2024-11-23T03:21:31,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:31,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:31,374 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/6d24da886d8a4fe39e996290af85c4a0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0 2024-11-23T03:21:31,380 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into 6d24da886d8a4fe39e996290af85c4a0(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:31,380 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:31,380 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332090534; duration=0sec 2024-11-23T03:21:31,381 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:31,381 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:31,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332151387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332151389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332151390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332151390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,395 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c10a2b184c2749e0b342366920f18a58 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c10a2b184c2749e0b342366920f18a58 2024-11-23T03:21:31,403 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into c10a2b184c2749e0b342366920f18a58(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:31,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:31,403 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332090535; duration=0sec 2024-11-23T03:21:31,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:31,403 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:31,463 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/15a3fceac3be40cb9fcf16ba3b53989a 2024-11-23T03:21:31,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/4fb71422a8fb48dfa23948fa8518feb0 is 50, key is test_row_0/B:col10/1732332089234/Put/seqid=0 2024-11-23T03:21:31,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742014_1190 (size=12151) 2024-11-23T03:21:31,493 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/4fb71422a8fb48dfa23948fa8518feb0 2024-11-23T03:21:31,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332151494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332151495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332151495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332151497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/21a523a1e83e459cb582abdebc853d47 is 50, key is test_row_0/C:col10/1732332089234/Put/seqid=0 2024-11-23T03:21:31,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742015_1191 (size=12151) 2024-11-23T03:21:31,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332151697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332151698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332151698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332151699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:31,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/21a523a1e83e459cb582abdebc853d47 2024-11-23T03:21:31,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/15a3fceac3be40cb9fcf16ba3b53989a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a 2024-11-23T03:21:31,922 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a, entries=150, sequenceid=214, filesize=30.4 K 2024-11-23T03:21:31,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/4fb71422a8fb48dfa23948fa8518feb0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0 2024-11-23T03:21:31,929 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0, entries=150, sequenceid=214, filesize=11.9 K 2024-11-23T03:21:31,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/21a523a1e83e459cb582abdebc853d47 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47 2024-11-23T03:21:31,934 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47, entries=150, sequenceid=214, filesize=11.9 K 2024-11-23T03:21:31,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 52f48ab0d0f5382239ab1e35c83b29ed in 1298ms, sequenceid=214, compaction requested=false 2024-11-23T03:21:31,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:31,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:31,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T03:21:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-23T03:21:31,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-23T03:21:31,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9110 sec 2024-11-23T03:21:31,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.9150 sec 2024-11-23T03:21:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:32,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:21:32,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:32,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:32,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:32,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:32,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:32,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:32,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332152005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332152005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332152006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c06935d8b269463bbbe2460dd5ac1715_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:32,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332152007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742016_1192 (size=14794) 2024-11-23T03:21:32,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332152108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332152109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332152110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332152112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T03:21:32,129 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-23T03:21:32,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-23T03:21:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:32,134 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:32,135 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:32,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:32,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:32,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:32,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332152311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332152311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332152314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332152315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,431 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:32,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:32,436 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c06935d8b269463bbbe2460dd5ac1715_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c06935d8b269463bbbe2460dd5ac1715_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:32,437 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/17115b99fc94494483f899fea1ca72ea, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:32,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/17115b99fc94494483f899fea1ca72ea is 175, key is test_row_0/A:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:32,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:32,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:32,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742017_1193 (size=39749) 2024-11-23T03:21:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,443 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/17115b99fc94494483f899fea1ca72ea 2024-11-23T03:21:32,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/665333ca9547478784780b4e4a5967b3 is 50, key is test_row_0/B:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:32,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742018_1194 (size=12151) 2024-11-23T03:21:32,593 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:32,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:32,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332152613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332152615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332152619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332152619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:32,746 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:32,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:32,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/665333ca9547478784780b4e4a5967b3 2024-11-23T03:21:32,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/af59e2486c6e4df6a3fa6aeb5ace1db9 is 50, key is test_row_0/C:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:32,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742019_1195 (size=12151) 2024-11-23T03:21:32,899 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:32,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:32,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:32,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:32,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:32,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:33,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:33,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:33,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332153120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332153121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332153122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332153123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:33,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:33,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:33,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:33,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/af59e2486c6e4df6a3fa6aeb5ace1db9 2024-11-23T03:21:33,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/17115b99fc94494483f899fea1ca72ea as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea 2024-11-23T03:21:33,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea, entries=200, sequenceid=244, filesize=38.8 K 2024-11-23T03:21:33,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/665333ca9547478784780b4e4a5967b3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3 2024-11-23T03:21:33,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3, entries=150, sequenceid=244, filesize=11.9 K 2024-11-23T03:21:33,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/af59e2486c6e4df6a3fa6aeb5ace1db9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9 2024-11-23T03:21:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9, entries=150, sequenceid=244, filesize=11.9 K 2024-11-23T03:21:33,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 52f48ab0d0f5382239ab1e35c83b29ed in 1297ms, sequenceid=244, compaction requested=true 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:33,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:33,298 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:33,298 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:33,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:33,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,299 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:33,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,299 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:33,299 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:33,299 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:33,299 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,299 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,300 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/13cd6812049a4754911bfec282381665, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=36.0 K 2024-11-23T03:21:33,300 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=100.0 K 2024-11-23T03:21:33,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,300 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea] 2024-11-23T03:21:33,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,300 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 13cd6812049a4754911bfec282381665, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:33,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d24da886d8a4fe39e996290af85c4a0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:33,300 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fb71422a8fb48dfa23948fa8518feb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732332089227 2024-11-23T03:21:33,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,301 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15a3fceac3be40cb9fcf16ba3b53989a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732332089227 2024-11-23T03:21:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,301 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 665333ca9547478784780b4e4a5967b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:33,301 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17115b99fc94494483f899fea1ca72ea, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,309 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,311 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411231408e816ae8146158e08379dac15640d_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:33,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,316 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#162 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,317 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/3ef08ae468ad4608859a05b4d60bdb80 is 50, key is test_row_0/B:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,318 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411231408e816ae8146158e08379dac15640d_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:33,318 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231408e816ae8146158e08379dac15640d_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742020_1196 (size=12697) 2024-11-23T03:21:33,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742021_1197 (size=4469) 2024-11-23T03:21:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,337 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/3ef08ae468ad4608859a05b4d60bdb80 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3ef08ae468ad4608859a05b4d60bdb80 2024-11-23T03:21:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,337 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#163 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,338 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/45b97a78dfe247859e0c5393a8dc0813 is 175, key is test_row_0/A:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:33,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,342 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into 3ef08ae468ad4608859a05b4d60bdb80(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:33,342 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:33,342 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332093298; duration=0sec 2024-11-23T03:21:33,343 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:33,343 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:33,343 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:33,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,344 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:33,344 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:33,344 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,344 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c10a2b184c2749e0b342366920f18a58, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=36.0 K 2024-11-23T03:21:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,345 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c10a2b184c2749e0b342366920f18a58, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732332088611 2024-11-23T03:21:33,346 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 21a523a1e83e459cb582abdebc853d47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732332089227 2024-11-23T03:21:33,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,346 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting af59e2486c6e4df6a3fa6aeb5ace1db9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:33,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742022_1198 (size=31651) 2024-11-23T03:21:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,358 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:33,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T03:21:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:33,359 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T03:21:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:33,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:33,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,366 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#164 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:33,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,367 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c760c4ca885f477eb6f2b160cf6db888 is 50, key is test_row_0/C:col10/1732332091389/Put/seqid=0 2024-11-23T03:21:33,368 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/45b97a78dfe247859e0c5393a8dc0813 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813 2024-11-23T03:21:33,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,379 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into 45b97a78dfe247859e0c5393a8dc0813(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:33,379 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:33,379 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332093298; duration=0sec 2024-11-23T03:21:33,379 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:33,379 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:33,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236fee4c63e27044d585d0912896c253a5_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332092005/Put/seqid=0 2024-11-23T03:21:33,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742023_1199 (size=12697) 2024-11-23T03:21:33,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,400 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c760c4ca885f477eb6f2b160cf6db888 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c760c4ca885f477eb6f2b160cf6db888 2024-11-23T03:21:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,406 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into c760c4ca885f477eb6f2b160cf6db888(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:33,406 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:33,406 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332093298; duration=0sec 2024-11-23T03:21:33,406 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:33,406 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:33,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742024_1200 (size=9814) 2024-11-23T03:21:33,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,820 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236fee4c63e27044d585d0912896c253a5_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236fee4c63e27044d585d0912896c253a5_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f6d331843e474b998f9666adef30d782, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f6d331843e474b998f9666adef30d782 is 175, key is test_row_0/A:col10/1732332092005/Put/seqid=0 2024-11-23T03:21:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742025_1201 (size=22461) 2024-11-23T03:21:33,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:33,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332154172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332154173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332154174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332154174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:34,243 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f6d331843e474b998f9666adef30d782 2024-11-23T03:21:34,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/7acdfa5ccb9d46b7a67c248c552b0327 is 50, key is test_row_0/B:col10/1732332092005/Put/seqid=0 2024-11-23T03:21:34,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332154278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332154281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332154281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332154281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742026_1202 (size=9757) 2024-11-23T03:21:34,287 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/7acdfa5ccb9d46b7a67c248c552b0327 2024-11-23T03:21:34,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c3ed1f5e1414569a9479cfb0414b9d3 is 50, key is test_row_0/C:col10/1732332092005/Put/seqid=0 2024-11-23T03:21:34,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742027_1203 (size=9757) 2024-11-23T03:21:34,315 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c3ed1f5e1414569a9479cfb0414b9d3 2024-11-23T03:21:34,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/f6d331843e474b998f9666adef30d782 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782 2024-11-23T03:21:34,335 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782, entries=100, sequenceid=254, filesize=21.9 K 2024-11-23T03:21:34,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/7acdfa5ccb9d46b7a67c248c552b0327 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327 2024-11-23T03:21:34,343 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327, entries=100, sequenceid=254, filesize=9.5 K 2024-11-23T03:21:34,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/1c3ed1f5e1414569a9479cfb0414b9d3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3 2024-11-23T03:21:34,350 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3, entries=100, sequenceid=254, filesize=9.5 K 2024-11-23T03:21:34,351 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 52f48ab0d0f5382239ab1e35c83b29ed in 992ms, sequenceid=254, compaction requested=false 2024-11-23T03:21:34,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:34,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:34,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T03:21:34,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-23T03:21:34,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-23T03:21:34,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2170 sec 2024-11-23T03:21:34,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.2220 sec 2024-11-23T03:21:34,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:34,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T03:21:34,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:34,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:34,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:34,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:34,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:34,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:34,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332154492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332154493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332154493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332154496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d7972585a335479b96f61e90096d1d0c_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:34,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742028_1204 (size=12454) 2024-11-23T03:21:34,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332154597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332154598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332154598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332154599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332154801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332154801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332154801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332154801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:34,920 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:34,925 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d7972585a335479b96f61e90096d1d0c_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d7972585a335479b96f61e90096d1d0c_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:34,925 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/aaf642f8af84413383d99f1780e5eca8, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:34,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/aaf642f8af84413383d99f1780e5eca8 is 175, key is test_row_0/A:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742029_1205 (size=31255) 2024-11-23T03:21:35,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332155104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332155105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332155105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332155105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,334 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/aaf642f8af84413383d99f1780e5eca8 2024-11-23T03:21:35,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/37444d11941b41fba85b5105f62977f4 is 50, key is test_row_0/B:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:35,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742030_1206 (size=12301) 2024-11-23T03:21:35,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/37444d11941b41fba85b5105f62977f4 2024-11-23T03:21:35,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/dff15148090441cfbd95a82ae21933a9 is 50, key is test_row_0/C:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742031_1207 (size=12301) 2024-11-23T03:21:35,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332155609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332155609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332155609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:35,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332155613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:35,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/dff15148090441cfbd95a82ae21933a9 2024-11-23T03:21:35,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/aaf642f8af84413383d99f1780e5eca8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8 2024-11-23T03:21:35,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8, entries=150, sequenceid=284, filesize=30.5 K 2024-11-23T03:21:35,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/37444d11941b41fba85b5105f62977f4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4 2024-11-23T03:21:35,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4, entries=150, sequenceid=284, filesize=12.0 K 2024-11-23T03:21:35,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/dff15148090441cfbd95a82ae21933a9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9 2024-11-23T03:21:35,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9, entries=150, sequenceid=284, filesize=12.0 K 2024-11-23T03:21:35,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 52f48ab0d0f5382239ab1e35c83b29ed in 1319ms, sequenceid=284, compaction requested=true 2024-11-23T03:21:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:35,803 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:35,803 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:35,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:35,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:35,805 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:35,805 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=83.4 K 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:35,805 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:35,805 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8] 2024-11-23T03:21:35,805 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3ef08ae468ad4608859a05b4d60bdb80, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=33.9 K 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45b97a78dfe247859e0c5393a8dc0813, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:35,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ef08ae468ad4608859a05b4d60bdb80, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:35,806 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7acdfa5ccb9d46b7a67c248c552b0327, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732332092005 2024-11-23T03:21:35,806 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6d331843e474b998f9666adef30d782, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732332092005 2024-11-23T03:21:35,806 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 37444d11941b41fba85b5105f62977f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:35,807 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting aaf642f8af84413383d99f1780e5eca8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:35,815 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:35,816 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b6cf79c1758d45abb71151e543110fea is 50, key is test_row_0/B:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:35,818 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:35,820 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123a3ceb923678746278a1dd0be74850734_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:35,822 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123a3ceb923678746278a1dd0be74850734_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:35,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742032_1208 (size=12949) 2024-11-23T03:21:35,822 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a3ceb923678746278a1dd0be74850734_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:35,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742033_1209 (size=4469) 2024-11-23T03:21:36,228 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#172 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:36,229 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b04e8d50f6e84ddd8eef8ec0455d7aed is 175, key is test_row_0/A:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:36,230 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/b6cf79c1758d45abb71151e543110fea as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b6cf79c1758d45abb71151e543110fea 2024-11-23T03:21:36,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742034_1210 (size=31903) 2024-11-23T03:21:36,238 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into b6cf79c1758d45abb71151e543110fea(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:36,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:36,238 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332095803; duration=0sec 2024-11-23T03:21:36,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:36,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:36,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:36,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T03:21:36,239 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-23T03:21:36,240 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:36,240 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:36,240 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:36,241 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c760c4ca885f477eb6f2b160cf6db888, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=33.9 K 2024-11-23T03:21:36,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:36,241 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c760c4ca885f477eb6f2b160cf6db888, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732332091387 2024-11-23T03:21:36,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-23T03:21:36,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:36,243 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:36,243 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:36,243 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:36,245 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c3ed1f5e1414569a9479cfb0414b9d3, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732332092005 2024-11-23T03:21:36,246 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting dff15148090441cfbd95a82ae21933a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:36,254 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#173 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:36,255 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f3a74cfa7b9c495c907988da09dc8afd is 50, key is test_row_0/C:col10/1732332094482/Put/seqid=0 2024-11-23T03:21:36,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742035_1211 (size=12949) 2024-11-23T03:21:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:36,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T03:21:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:36,396 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:21:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:36,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:36,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:36,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:36,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123287b5c563d65499ea005859ff8283d08_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332094494/Put/seqid=0 2024-11-23T03:21:36,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742036_1212 (size=12454) 2024-11-23T03:21:36,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:36,413 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123287b5c563d65499ea005859ff8283d08_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123287b5c563d65499ea005859ff8283d08_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9444a082d8a54b6fba3de704c6f3b9b1, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9444a082d8a54b6fba3de704c6f3b9b1 is 175, key is test_row_0/A:col10/1732332094494/Put/seqid=0 2024-11-23T03:21:36,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742037_1213 (size=31255) 2024-11-23T03:21:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:36,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:36,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332156638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332156638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,641 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b04e8d50f6e84ddd8eef8ec0455d7aed as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed 2024-11-23T03:21:36,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332156639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332156640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,646 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into b04e8d50f6e84ddd8eef8ec0455d7aed(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:36,646 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:36,646 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332095803; duration=0sec 2024-11-23T03:21:36,646 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:36,646 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:36,668 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/f3a74cfa7b9c495c907988da09dc8afd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f3a74cfa7b9c495c907988da09dc8afd 2024-11-23T03:21:36,673 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into f3a74cfa7b9c495c907988da09dc8afd(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:36,673 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:36,673 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332095804; duration=0sec 2024-11-23T03:21:36,674 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:36,674 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:36,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332156742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332156742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332156743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332156748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,821 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9444a082d8a54b6fba3de704c6f3b9b1 2024-11-23T03:21:36,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/db306424a56c4e46b3a67f026802c58a is 50, key is test_row_0/B:col10/1732332094494/Put/seqid=0 2024-11-23T03:21:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742038_1214 (size=12301) 2024-11-23T03:21:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:36,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332156946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332156946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332156947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:36,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332156952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,236 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/db306424a56c4e46b3a67f026802c58a 2024-11-23T03:21:37,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c9f95817e10c45daa9f1710ecc0dad0e is 50, key is test_row_0/C:col10/1732332094494/Put/seqid=0 2024-11-23T03:21:37,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332157249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332157251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332157252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332157254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742039_1215 (size=12301) 2024-11-23T03:21:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:37,661 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c9f95817e10c45daa9f1710ecc0dad0e 2024-11-23T03:21:37,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9444a082d8a54b6fba3de704c6f3b9b1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1 2024-11-23T03:21:37,671 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1, entries=150, sequenceid=293, filesize=30.5 K 2024-11-23T03:21:37,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/db306424a56c4e46b3a67f026802c58a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a 2024-11-23T03:21:37,676 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a, entries=150, sequenceid=293, filesize=12.0 K 2024-11-23T03:21:37,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/c9f95817e10c45daa9f1710ecc0dad0e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e 2024-11-23T03:21:37,684 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e, entries=150, sequenceid=293, filesize=12.0 K 2024-11-23T03:21:37,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:21:37,685 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 52f48ab0d0f5382239ab1e35c83b29ed in 1289ms, sequenceid=293, compaction requested=false 2024-11-23T03:21:37,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:37,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:37,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-23T03:21:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-23T03:21:37,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-23T03:21:37,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4430 sec 2024-11-23T03:21:37,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.4470 sec 2024-11-23T03:21:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:37,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-23T03:21:37,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:37,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:37,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:37,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:37,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:37,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:37,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332157758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332157758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332157759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332157760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236369a8482a794d3ebd969199e134b8d1_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:37,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742040_1216 (size=17534) 2024-11-23T03:21:37,781 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:37,786 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236369a8482a794d3ebd969199e134b8d1_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236369a8482a794d3ebd969199e134b8d1_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:37,787 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/328b7ed9e8fd4d9685c4ef85b9846d63, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:37,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/328b7ed9e8fd4d9685c4ef85b9846d63 is 175, key is test_row_0/A:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:37,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742041_1217 (size=48639) 2024-11-23T03:21:37,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36254 deadline: 1732332157854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,856 DEBUG [Thread-648 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18283 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:37,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332157862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332157862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:37,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332157863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332158063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332158065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332158077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,194 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/328b7ed9e8fd4d9685c4ef85b9846d63 2024-11-23T03:21:38,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/fb935502e961445488a4c88874635e61 is 50, key is test_row_0/B:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:38,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742042_1218 (size=12301) 2024-11-23T03:21:38,281 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T03:21:38,347 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-23T03:21:38,348 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-23T03:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T03:21:38,349 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:38,350 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:38,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:38,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332158365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332158366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332158379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T03:21:38,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T03:21:38,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:38,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:38,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:38,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/fb935502e961445488a4c88874635e61 2024-11-23T03:21:38,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/772c84a000044220a77d035edc8a7e9a is 50, key is test_row_0/C:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:38,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742043_1219 (size=12301) 2024-11-23T03:21:38,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/772c84a000044220a77d035edc8a7e9a 2024-11-23T03:21:38,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/328b7ed9e8fd4d9685c4ef85b9846d63 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63 2024-11-23T03:21:38,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63, entries=250, sequenceid=325, filesize=47.5 K 2024-11-23T03:21:38,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/fb935502e961445488a4c88874635e61 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61 2024-11-23T03:21:38,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61, entries=150, sequenceid=325, filesize=12.0 K 2024-11-23T03:21:38,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/772c84a000044220a77d035edc8a7e9a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a 2024-11-23T03:21:38,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a, entries=150, sequenceid=325, filesize=12.0 K 2024-11-23T03:21:38,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 52f48ab0d0f5382239ab1e35c83b29ed in 887ms, sequenceid=325, compaction requested=true 2024-11-23T03:21:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:38,644 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:38,644 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:38,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:38,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:38,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:38,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,648 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:38,648 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:38,648 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,648 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=109.2 K 2024-11-23T03:21:38,648 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,648 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63] 2024-11-23T03:21:38,649 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:38,649 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b04e8d50f6e84ddd8eef8ec0455d7aed, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:38,649 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:38,649 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,649 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b6cf79c1758d45abb71151e543110fea, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=36.7 K 2024-11-23T03:21:38,649 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9444a082d8a54b6fba3de704c6f3b9b1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732332094491 2024-11-23T03:21:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,649 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b6cf79c1758d45abb71151e543110fea, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:38,650 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 328b7ed9e8fd4d9685c4ef85b9846d63, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096635 2024-11-23T03:21:38,650 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting db306424a56c4e46b3a67f026802c58a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732332094491 2024-11-23T03:21:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,651 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fb935502e961445488a4c88874635e61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096638 2024-11-23T03:21:38,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T03:21:38,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T03:21:38,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,656 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-23T03:21:38,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:38,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:38,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,660 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,668 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#181 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:38,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,669 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/88d85af94b3d4a0593e5a62a98264ee6 is 50, key is test_row_0/B:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:38,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,670 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123e0e3eecf0ccc472ebe2888cbf40950dc_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:38,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c61ac326788a4a629e45dfd52eb430ad_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_1/A:col10/1732332097758/Put/seqid=0 2024-11-23T03:21:38,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,678 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123e0e3eecf0ccc472ebe2888cbf40950dc_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:38,678 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e0e3eecf0ccc472ebe2888cbf40950dc_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:38,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742045_1221 (size=4469) 2024-11-23T03:21:38,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742044_1220 (size=13051) 2024-11-23T03:21:38,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,692 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#180 average throughput is 0.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:38,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,693 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/d9e753d4b59a44f6a1fc09de1ad7bf37 is 175, key is test_row_0/A:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742046_1222 (size=9914) 2024-11-23T03:21:38,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,712 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c61ac326788a4a629e45dfd52eb430ad_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c61ac326788a4a629e45dfd52eb430ad_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:38,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e4958576ee364dd181251ae963a053af, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:38,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e4958576ee364dd181251ae963a053af is 175, key is test_row_1/A:col10/1732332097758/Put/seqid=0 2024-11-23T03:21:38,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742047_1223 (size=32005) 2024-11-23T03:21:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742048_1224 (size=22561) 2024-11-23T03:21:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,727 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e4958576ee364dd181251ae963a053af 2024-11-23T03:21:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,729 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/d9e753d4b59a44f6a1fc09de1ad7bf37 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37 2024-11-23T03:21:38,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0b0d9e0cd29148c7a2c0885c39f19b0f is 50, key is test_row_1/B:col10/1732332097758/Put/seqid=0 2024-11-23T03:21:38,742 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into d9e753d4b59a44f6a1fc09de1ad7bf37(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:38,742 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:38,742 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332098644; duration=0sec 2024-11-23T03:21:38,742 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:38,742 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:38,742 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:38,743 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:38,743 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:38,743 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:38,743 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f3a74cfa7b9c495c907988da09dc8afd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=36.7 K 2024-11-23T03:21:38,744 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3a74cfa7b9c495c907988da09dc8afd, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732332094166 2024-11-23T03:21:38,744 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9f95817e10c45daa9f1710ecc0dad0e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732332094491 2024-11-23T03:21:38,745 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 772c84a000044220a77d035edc8a7e9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096638 2024-11-23T03:21:38,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742049_1225 (size=9857) 2024-11-23T03:21:38,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,756 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0b0d9e0cd29148c7a2c0885c39f19b0f 2024-11-23T03:21:38,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,775 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#184 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:38,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,776 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/110f57f6097f4afaab33051f79abe7ae is 50, key is test_row_0/C:col10/1732332097755/Put/seqid=0 2024-11-23T03:21:38,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/aae032a1033544ff8e1d52c98d078916 is 50, key is test_row_1/C:col10/1732332097758/Put/seqid=0 2024-11-23T03:21:38,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742050_1226 (size=13051) 2024-11-23T03:21:38,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,819 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/110f57f6097f4afaab33051f79abe7ae as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/110f57f6097f4afaab33051f79abe7ae 2024-11-23T03:21:38,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,826 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into 110f57f6097f4afaab33051f79abe7ae(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:38,826 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:38,826 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332098645; duration=0sec 2024-11-23T03:21:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,827 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:38,827 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742051_1227 (size=9857) 2024-11-23T03:21:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:38,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:38,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332158921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332158925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332158927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332158927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T03:21:39,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332159028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332159030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332159031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332159031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,096 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/88d85af94b3d4a0593e5a62a98264ee6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/88d85af94b3d4a0593e5a62a98264ee6 2024-11-23T03:21:39,103 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into 88d85af94b3d4a0593e5a62a98264ee6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:39,103 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:39,103 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332098644; duration=0sec 2024-11-23T03:21:39,103 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:39,103 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:39,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332159231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332159232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332159233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332159233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,245 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/aae032a1033544ff8e1d52c98d078916 2024-11-23T03:21:39,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/e4958576ee364dd181251ae963a053af as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af 2024-11-23T03:21:39,256 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af, entries=100, sequenceid=331, filesize=22.0 K 2024-11-23T03:21:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/0b0d9e0cd29148c7a2c0885c39f19b0f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f 2024-11-23T03:21:39,262 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f, entries=100, sequenceid=331, filesize=9.6 K 2024-11-23T03:21:39,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/aae032a1033544ff8e1d52c98d078916 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916 2024-11-23T03:21:39,267 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916, entries=100, sequenceid=331, filesize=9.6 K 2024-11-23T03:21:39,267 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=187.85 KB/192360 for 52f48ab0d0f5382239ab1e35c83b29ed in 611ms, sequenceid=331, compaction requested=false 2024-11-23T03:21:39,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:39,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-23T03:21:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-23T03:21:39,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-23T03:21:39,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 919 msec 2024-11-23T03:21:39,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 922 msec 2024-11-23T03:21:39,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T03:21:39,453 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-23T03:21:39,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-23T03:21:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:39,455 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:39,456 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:39,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:39,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:39,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:39,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332159536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:39,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332159537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332159539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332159551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:39,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236e67c5f581894272842c6d25b37499b0_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:39,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742052_1228 (size=12454) 2024-11-23T03:21:39,573 DEBUG [Thread-661 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:61411 2024-11-23T03:21:39,573 DEBUG [Thread-659 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:61411 2024-11-23T03:21:39,573 DEBUG [Thread-661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:39,573 DEBUG [Thread-659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:39,575 DEBUG [Thread-663 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:61411 2024-11-23T03:21:39,575 DEBUG [Thread-663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:39,577 DEBUG [Thread-665 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:61411 2024-11-23T03:21:39,577 DEBUG [Thread-665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:39,607 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332159640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332159644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332159654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:39,760 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:39,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:39,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332159843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332159846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332159856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:39,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:39,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:39,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:39,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:39,962 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:39,966 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236e67c5f581894272842c6d25b37499b0_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236e67c5f581894272842c6d25b37499b0_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:39,967 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b26ba9a7482b47c6878ba73bc638fbaa, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:39,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b26ba9a7482b47c6878ba73bc638fbaa is 175, key is test_row_0/A:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:39,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742053_1229 (size=31255) 2024-11-23T03:21:40,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332160044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:40,065 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332160145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332160148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332160158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,371 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=367, memsize=67.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b26ba9a7482b47c6878ba73bc638fbaa 2024-11-23T03:21:40,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/51011d3099494a76a6fa603c92245749 is 50, key is test_row_0/B:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:40,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742054_1230 (size=12301) 2024-11-23T03:21:40,524 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:40,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732332160649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36190 deadline: 1732332160651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36184 deadline: 1732332160662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/51011d3099494a76a6fa603c92245749 2024-11-23T03:21:40,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/8f88e9ed33dc4c298d4bba23cd82a0eb is 50, key is test_row_0/C:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:40,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742055_1231 (size=12301) 2024-11-23T03:21:40,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:40,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:40,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:40,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:40,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:41,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:41,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732332161047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:41,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:41,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:41,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. as already flushing 2024-11-23T03:21:41,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:41,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:41,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/8f88e9ed33dc4c298d4bba23cd82a0eb 2024-11-23T03:21:41,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/b26ba9a7482b47c6878ba73bc638fbaa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa 2024-11-23T03:21:41,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa, entries=150, sequenceid=367, filesize=30.5 K 2024-11-23T03:21:41,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/51011d3099494a76a6fa603c92245749 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749 2024-11-23T03:21:41,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749, entries=150, sequenceid=367, filesize=12.0 K 2024-11-23T03:21:41,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/8f88e9ed33dc4c298d4bba23cd82a0eb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb 2024-11-23T03:21:41,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb, entries=150, sequenceid=367, filesize=12.0 K 2024-11-23T03:21:41,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=6.71 KB/6870 for 52f48ab0d0f5382239ab1e35c83b29ed in 1673ms, sequenceid=367, compaction requested=true 2024-11-23T03:21:41,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f48ab0d0f5382239ab1e35c83b29ed:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:41,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/A is initiating minor compaction (all files) 2024-11-23T03:21:41,211 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/B is initiating minor compaction (all files) 2024-11-23T03:21:41,212 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/A in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,212 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/B in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,212 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=83.8 K 2024-11-23T03:21:41,212 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/88d85af94b3d4a0593e5a62a98264ee6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=34.4 K 2024-11-23T03:21:41,212 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,212 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa] 2024-11-23T03:21:41,212 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 88d85af94b3d4a0593e5a62a98264ee6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096638 2024-11-23T03:21:41,212 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9e753d4b59a44f6a1fc09de1ad7bf37, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096638 2024-11-23T03:21:41,212 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b0d9e0cd29148c7a2c0885c39f19b0f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332097757 2024-11-23T03:21:41,212 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4958576ee364dd181251ae963a053af, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332097757 2024-11-23T03:21:41,213 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 51011d3099494a76a6fa603c92245749, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732332098924 2024-11-23T03:21:41,213 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b26ba9a7482b47c6878ba73bc638fbaa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732332098924 2024-11-23T03:21:41,219 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:41,220 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:41,221 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/90f4d2303afc4ffa975c74959c4e35db is 50, key is test_row_0/B:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:41,222 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112384f89d6634e4455292febd9c6f5ccd54_52f48ab0d0f5382239ab1e35c83b29ed store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:41,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742056_1232 (size=13153) 2024-11-23T03:21:41,242 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112384f89d6634e4455292febd9c6f5ccd54_52f48ab0d0f5382239ab1e35c83b29ed, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:41,242 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112384f89d6634e4455292febd9c6f5ccd54_52f48ab0d0f5382239ab1e35c83b29ed because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:41,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742057_1233 (size=4469) 2024-11-23T03:21:41,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:41,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T03:21:41,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,287 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:41,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e8a74b26db884213b10621b4503ba927_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_1/A:col10/1732332099539/Put/seqid=0 2024-11-23T03:21:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742058_1234 (size=7374) 2024-11-23T03:21:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:41,631 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/90f4d2303afc4ffa975c74959c4e35db as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/90f4d2303afc4ffa975c74959c4e35db 2024-11-23T03:21:41,635 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/B of 52f48ab0d0f5382239ab1e35c83b29ed into 90f4d2303afc4ffa975c74959c4e35db(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:41,635 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:41,635 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/B, priority=13, startTime=1732332101211; duration=0sec 2024-11-23T03:21:41,635 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:41,635 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:B 2024-11-23T03:21:41,635 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:41,636 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:41,636 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 52f48ab0d0f5382239ab1e35c83b29ed/C is initiating minor compaction (all files) 2024-11-23T03:21:41,636 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 52f48ab0d0f5382239ab1e35c83b29ed/C in TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:41,636 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/110f57f6097f4afaab33051f79abe7ae, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp, totalSize=34.4 K 2024-11-23T03:21:41,637 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 110f57f6097f4afaab33051f79abe7ae, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732332096638 2024-11-23T03:21:41,637 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting aae032a1033544ff8e1d52c98d078916, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332097757 2024-11-23T03:21:41,637 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f88e9ed33dc4c298d4bba23cd82a0eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732332098924 2024-11-23T03:21:41,644 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#C#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:41,645 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/13efc076a6a14676934d2d3fdbdd0ccf is 50, key is test_row_0/C:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:41,647 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f48ab0d0f5382239ab1e35c83b29ed#A#compaction#190 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:41,648 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/6366c567383c4f2c8ede53b165b8e7bb is 175, key is test_row_0/A:col10/1732332099536/Put/seqid=0 2024-11-23T03:21:41,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742059_1235 (size=13153) 2024-11-23T03:21:41,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742060_1236 (size=32107) 2024-11-23T03:21:41,660 DEBUG [Thread-652 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:61411 2024-11-23T03:21:41,660 DEBUG [Thread-654 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:61411 2024-11-23T03:21:41,660 DEBUG [Thread-654 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:41,660 DEBUG [Thread-652 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:41,673 DEBUG [Thread-656 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:61411 2024-11-23T03:21:41,673 DEBUG [Thread-656 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:41,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:41,703 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e8a74b26db884213b10621b4503ba927_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e8a74b26db884213b10621b4503ba927_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:41,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/db9bb0fcee0140f08edf14aea2096c84, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/db9bb0fcee0140f08edf14aea2096c84 is 175, key is test_row_1/A:col10/1732332099539/Put/seqid=0 2024-11-23T03:21:41,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742061_1237 (size=13865) 2024-11-23T03:21:42,053 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/13efc076a6a14676934d2d3fdbdd0ccf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/13efc076a6a14676934d2d3fdbdd0ccf 2024-11-23T03:21:42,057 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/C of 52f48ab0d0f5382239ab1e35c83b29ed into 13efc076a6a14676934d2d3fdbdd0ccf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:42,057 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:42,057 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/C, priority=13, startTime=1732332101211; duration=0sec 2024-11-23T03:21:42,058 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:42,058 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:C 2024-11-23T03:21:42,061 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/6366c567383c4f2c8ede53b165b8e7bb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6366c567383c4f2c8ede53b165b8e7bb 2024-11-23T03:21:42,065 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 52f48ab0d0f5382239ab1e35c83b29ed/A of 52f48ab0d0f5382239ab1e35c83b29ed into 6366c567383c4f2c8ede53b165b8e7bb(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:42,065 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:42,065 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed., storeName=52f48ab0d0f5382239ab1e35c83b29ed/A, priority=13, startTime=1732332101210; duration=0sec 2024-11-23T03:21:42,065 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:42,065 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f48ab0d0f5382239ab1e35c83b29ed:A 2024-11-23T03:21:42,108 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=371, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/db9bb0fcee0140f08edf14aea2096c84 2024-11-23T03:21:42,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/8971b029d792488bb7a555a361ab6fb3 is 50, key is test_row_1/B:col10/1732332099539/Put/seqid=0 2024-11-23T03:21:42,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742062_1238 (size=7415) 2024-11-23T03:21:42,519 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/8971b029d792488bb7a555a361ab6fb3 2024-11-23T03:21:42,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5b92f4230bfa4fd5b82211ccedaefaa9 is 50, key is test_row_1/C:col10/1732332099539/Put/seqid=0 2024-11-23T03:21:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742063_1239 (size=7415) 2024-11-23T03:21:42,930 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5b92f4230bfa4fd5b82211ccedaefaa9 2024-11-23T03:21:42,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/db9bb0fcee0140f08edf14aea2096c84 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/db9bb0fcee0140f08edf14aea2096c84 2024-11-23T03:21:42,938 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/db9bb0fcee0140f08edf14aea2096c84, entries=50, sequenceid=371, filesize=13.5 K 2024-11-23T03:21:42,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/8971b029d792488bb7a555a361ab6fb3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/8971b029d792488bb7a555a361ab6fb3 2024-11-23T03:21:42,943 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/8971b029d792488bb7a555a361ab6fb3, entries=50, sequenceid=371, filesize=7.2 K 2024-11-23T03:21:42,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/5b92f4230bfa4fd5b82211ccedaefaa9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5b92f4230bfa4fd5b82211ccedaefaa9 2024-11-23T03:21:42,947 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5b92f4230bfa4fd5b82211ccedaefaa9, entries=50, sequenceid=371, filesize=7.2 K 2024-11-23T03:21:42,948 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=20.13 KB/20610 for 52f48ab0d0f5382239ab1e35c83b29ed in 1661ms, sequenceid=371, compaction requested=false 2024-11-23T03:21:42,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:42,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:42,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-23T03:21:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-23T03:21:42,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-23T03:21:42,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4930 sec 2024-11-23T03:21:42,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 3.4970 sec 2024-11-23T03:21:43,054 DEBUG [Thread-650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:61411 2024-11-23T03:21:43,054 DEBUG [Thread-650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:43,192 DEBUG [master/0d51875c74df:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 083dc89e8b2b1c4aa6851e27c52fd159 changed from -1.0 to 0.0, refreshing cache 2024-11-23T03:21:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T03:21:43,560 INFO [Thread-658 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-23T03:21:47,910 DEBUG [Thread-648 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7362d978 to 127.0.0.1:61411 2024-11-23T03:21:47,910 DEBUG [Thread-648 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6852 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6754 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2932 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8796 rows 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2909 2024-11-23T03:21:47,911 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8726 rows 2024-11-23T03:21:47,911 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:21:47,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04506927 to 127.0.0.1:61411 2024-11-23T03:21:47,911 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:21:47,913 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:21:47,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:21:47,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:47,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:47,917 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332107917"}]},"ts":"1732332107917"} 2024-11-23T03:21:47,919 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:21:47,921 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:21:47,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:21:47,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, UNASSIGN}] 2024-11-23T03:21:47,924 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, UNASSIGN 2024-11-23T03:21:47,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:47,925 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:21:47,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:48,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:48,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:48,077 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:48,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:21:48,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 52f48ab0d0f5382239ab1e35c83b29ed, disabling compactions & flushes 2024-11-23T03:21:48,077 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:48,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:48,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. after waiting 0 ms 2024-11-23T03:21:48,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:48,077 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing 52f48ab0d0f5382239ab1e35c83b29ed 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=A 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=B 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 52f48ab0d0f5382239ab1e35c83b29ed, store=C 2024-11-23T03:21:48,078 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:48,085 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232612a3a3df954c5cab51cab3fc11eb89_52f48ab0d0f5382239ab1e35c83b29ed is 50, key is test_row_0/A:col10/1732332107909/Put/seqid=0 2024-11-23T03:21:48,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742064_1240 (size=12454) 2024-11-23T03:21:48,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:48,489 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:48,494 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232612a3a3df954c5cab51cab3fc11eb89_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232612a3a3df954c5cab51cab3fc11eb89_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:48,495 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9d6875fd51b94d00821a289b0a320a98, store: [table=TestAcidGuarantees family=A region=52f48ab0d0f5382239ab1e35c83b29ed] 2024-11-23T03:21:48,496 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9d6875fd51b94d00821a289b0a320a98 is 175, key is test_row_0/A:col10/1732332107909/Put/seqid=0 2024-11-23T03:21:48,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742065_1241 (size=31255) 2024-11-23T03:21:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:48,901 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9d6875fd51b94d00821a289b0a320a98 2024-11-23T03:21:48,909 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/c80c766d98ea41f2ad2e88ba01dc3e4f is 50, key is test_row_0/B:col10/1732332107909/Put/seqid=0 2024-11-23T03:21:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742066_1242 (size=12301) 2024-11-23T03:21:49,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:49,314 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/c80c766d98ea41f2ad2e88ba01dc3e4f 2024-11-23T03:21:49,321 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/7bc1bb938ca7483887d0ac04e85bf896 is 50, key is test_row_0/C:col10/1732332107909/Put/seqid=0 2024-11-23T03:21:49,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742067_1243 (size=12301) 2024-11-23T03:21:49,726 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/7bc1bb938ca7483887d0ac04e85bf896 2024-11-23T03:21:49,731 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/A/9d6875fd51b94d00821a289b0a320a98 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9d6875fd51b94d00821a289b0a320a98 2024-11-23T03:21:49,735 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9d6875fd51b94d00821a289b0a320a98, entries=150, sequenceid=382, filesize=30.5 K 2024-11-23T03:21:49,736 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/B/c80c766d98ea41f2ad2e88ba01dc3e4f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/c80c766d98ea41f2ad2e88ba01dc3e4f 2024-11-23T03:21:49,740 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/c80c766d98ea41f2ad2e88ba01dc3e4f, entries=150, sequenceid=382, filesize=12.0 K 2024-11-23T03:21:49,740 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/.tmp/C/7bc1bb938ca7483887d0ac04e85bf896 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/7bc1bb938ca7483887d0ac04e85bf896 2024-11-23T03:21:49,743 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/7bc1bb938ca7483887d0ac04e85bf896, entries=150, sequenceid=382, filesize=12.0 K 2024-11-23T03:21:49,744 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 52f48ab0d0f5382239ab1e35c83b29ed in 1667ms, sequenceid=382, compaction requested=true 2024-11-23T03:21:49,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa] to archive 2024-11-23T03:21:49,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:49,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5b3c31e7d05f43dd94af6e43136ea960 2024-11-23T03:21:49,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/961f21045b354ebc9ad7b2482e0edc54 2024-11-23T03:21:49,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/da6d5824f4d34d7cb3b156ba2dde985b 2024-11-23T03:21:49,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d6c673468cba437e862c21a26bdd6072 2024-11-23T03:21:49,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e96314f48d464c27ae1ac1c8bcad2ae0 2024-11-23T03:21:49,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/5d9b40990bae42ac9c9ac86dcae636aa 2024-11-23T03:21:49,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/27bc3cee21694fbd9b714e879dd2657e 2024-11-23T03:21:49,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/227601f6beb44e1db3f37b019f7d4891 2024-11-23T03:21:49,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/a3a74f8d64364cc09a04612aa4e849f2 2024-11-23T03:21:49,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/309ac007588048f9a1427dedfc23bf79 2024-11-23T03:21:49,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9a6565ae7ed2474aabfd500756fa33ec 2024-11-23T03:21:49,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/7074f692e15c41758febdebd457626d4 2024-11-23T03:21:49,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6d24da886d8a4fe39e996290af85c4a0 2024-11-23T03:21:49,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f71b3cc8a35446a6a023d777c3f8917f 2024-11-23T03:21:49,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/15a3fceac3be40cb9fcf16ba3b53989a 2024-11-23T03:21:49,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/17115b99fc94494483f899fea1ca72ea 2024-11-23T03:21:49,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/45b97a78dfe247859e0c5393a8dc0813 2024-11-23T03:21:49,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/f6d331843e474b998f9666adef30d782 2024-11-23T03:21:49,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b04e8d50f6e84ddd8eef8ec0455d7aed 2024-11-23T03:21:49,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/aaf642f8af84413383d99f1780e5eca8 2024-11-23T03:21:49,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9444a082d8a54b6fba3de704c6f3b9b1 2024-11-23T03:21:49,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/328b7ed9e8fd4d9685c4ef85b9846d63 2024-11-23T03:21:49,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/d9e753d4b59a44f6a1fc09de1ad7bf37 2024-11-23T03:21:49,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/e4958576ee364dd181251ae963a053af 2024-11-23T03:21:49,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/b26ba9a7482b47c6878ba73bc638fbaa 2024-11-23T03:21:49,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b1e9d8fb17cb4bd1b29c839f8696c1b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b35a673f8a94005855694bbc89eb6cf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/dbd7a27dc2224ad58fcf2636abfefd59, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/13cd6812049a4754911bfec282381665, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3ef08ae468ad4608859a05b4d60bdb80, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b6cf79c1758d45abb71151e543110fea, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/88d85af94b3d4a0593e5a62a98264ee6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749] to archive 2024-11-23T03:21:49,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:49,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/eccc971fd1624304bf0ce8db38b09053 2024-11-23T03:21:49,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/9f1f48211ac94dfd8a452b0df78a68ad 2024-11-23T03:21:49,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b1e9d8fb17cb4bd1b29c839f8696c1b1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b1e9d8fb17cb4bd1b29c839f8696c1b1 2024-11-23T03:21:49,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0757d071eb7e4fd6be53ed614387123b 2024-11-23T03:21:49,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b264d2f3c5a24bbeb318c3e8ccb7257f 2024-11-23T03:21:49,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b35a673f8a94005855694bbc89eb6cf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b35a673f8a94005855694bbc89eb6cf 2024-11-23T03:21:49,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/297ebe6726cb4beabf0f79a09a8224c5 2024-11-23T03:21:49,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/afa877c0de964d74b5dc86b5f4e77d40 2024-11-23T03:21:49,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/94451afe46214eee9880f0a4ef133185 2024-11-23T03:21:49,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/dbd7a27dc2224ad58fcf2636abfefd59 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/dbd7a27dc2224ad58fcf2636abfefd59 2024-11-23T03:21:49,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b246801bae7243a7b78feea845ef76d7 2024-11-23T03:21:49,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/30dcc965d2a64f1d8875e7e5d08d965d 2024-11-23T03:21:49,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/13cd6812049a4754911bfec282381665 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/13cd6812049a4754911bfec282381665 2024-11-23T03:21:49,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3cf7164106e2410bb7a4f9771a6ab39a 2024-11-23T03:21:49,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/4fb71422a8fb48dfa23948fa8518feb0 2024-11-23T03:21:49,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3ef08ae468ad4608859a05b4d60bdb80 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/3ef08ae468ad4608859a05b4d60bdb80 2024-11-23T03:21:49,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/665333ca9547478784780b4e4a5967b3 2024-11-23T03:21:49,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/7acdfa5ccb9d46b7a67c248c552b0327 2024-11-23T03:21:49,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b6cf79c1758d45abb71151e543110fea to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/b6cf79c1758d45abb71151e543110fea 2024-11-23T03:21:49,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/37444d11941b41fba85b5105f62977f4 2024-11-23T03:21:49,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/db306424a56c4e46b3a67f026802c58a 2024-11-23T03:21:49,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/88d85af94b3d4a0593e5a62a98264ee6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/88d85af94b3d4a0593e5a62a98264ee6 2024-11-23T03:21:49,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/fb935502e961445488a4c88874635e61 2024-11-23T03:21:49,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/0b0d9e0cd29148c7a2c0885c39f19b0f 2024-11-23T03:21:49,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/51011d3099494a76a6fa603c92245749 2024-11-23T03:21:49,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f23e258e7a2f47c6a61977b58baa95b1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/620316b3248244f1836189af447c3fd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/66a7d349f05143a5a311c65fc531608b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c10a2b184c2749e0b342366920f18a58, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c760c4ca885f477eb6f2b160cf6db888, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f3a74cfa7b9c495c907988da09dc8afd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/110f57f6097f4afaab33051f79abe7ae, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb] to archive 2024-11-23T03:21:49,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:21:49,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/fc8ded802a5b4c06bef99e0fd4c6828a 2024-11-23T03:21:49,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c322e0ae61c347afab01f581f92ae966 2024-11-23T03:21:49,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f23e258e7a2f47c6a61977b58baa95b1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f23e258e7a2f47c6a61977b58baa95b1 2024-11-23T03:21:49,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c8b24a3491c4991a4cb01577bea73f0 2024-11-23T03:21:49,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/daec93b6af77403180fe477cd07d79fe 2024-11-23T03:21:49,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/620316b3248244f1836189af447c3fd4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/620316b3248244f1836189af447c3fd4 2024-11-23T03:21:49,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/822843c48f4346328b4b3773590fae8b 2024-11-23T03:21:49,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/10b7483ed0ac43a2822cea64394ce81a 2024-11-23T03:21:49,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1305eb4c54ff48229091943f43386338 2024-11-23T03:21:49,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/66a7d349f05143a5a311c65fc531608b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/66a7d349f05143a5a311c65fc531608b 2024-11-23T03:21:49,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f33ea53725f14c60928139ce5817f76c 2024-11-23T03:21:49,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/14160160a9ab4e39a73587464b544178 2024-11-23T03:21:49,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c10a2b184c2749e0b342366920f18a58 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c10a2b184c2749e0b342366920f18a58 2024-11-23T03:21:49,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5644ecf3d78d41f18a8bf8c34b47f821 2024-11-23T03:21:49,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/21a523a1e83e459cb582abdebc853d47 2024-11-23T03:21:49,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c760c4ca885f477eb6f2b160cf6db888 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c760c4ca885f477eb6f2b160cf6db888 2024-11-23T03:21:49,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/af59e2486c6e4df6a3fa6aeb5ace1db9 2024-11-23T03:21:49,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/1c3ed1f5e1414569a9479cfb0414b9d3 2024-11-23T03:21:49,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f3a74cfa7b9c495c907988da09dc8afd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/f3a74cfa7b9c495c907988da09dc8afd 2024-11-23T03:21:49,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/dff15148090441cfbd95a82ae21933a9 2024-11-23T03:21:49,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/c9f95817e10c45daa9f1710ecc0dad0e 2024-11-23T03:21:49,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/110f57f6097f4afaab33051f79abe7ae to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/110f57f6097f4afaab33051f79abe7ae 2024-11-23T03:21:49,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/772c84a000044220a77d035edc8a7e9a 2024-11-23T03:21:49,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/aae032a1033544ff8e1d52c98d078916 2024-11-23T03:21:49,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/8f88e9ed33dc4c298d4bba23cd82a0eb 2024-11-23T03:21:49,848 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits/385.seqid, newMaxSeqId=385, maxSeqId=4 2024-11-23T03:21:49,849 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed. 2024-11-23T03:21:49,849 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 52f48ab0d0f5382239ab1e35c83b29ed: 2024-11-23T03:21:49,850 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:49,851 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=52f48ab0d0f5382239ab1e35c83b29ed, regionState=CLOSED 2024-11-23T03:21:49,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-23T03:21:49,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 52f48ab0d0f5382239ab1e35c83b29ed, server=0d51875c74df,34141,1732332039937 in 1.9270 sec 2024-11-23T03:21:49,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-23T03:21:49,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=52f48ab0d0f5382239ab1e35c83b29ed, UNASSIGN in 1.9300 sec 2024-11-23T03:21:49,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-23T03:21:49,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9330 sec 2024-11-23T03:21:49,857 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332109857"}]},"ts":"1732332109857"} 2024-11-23T03:21:49,858 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:21:49,860 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:21:49,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9470 sec 2024-11-23T03:21:50,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T03:21:50,021 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-23T03:21:50,022 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:21:50,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,024 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-23T03:21:50,026 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,027 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,033 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits] 2024-11-23T03:21:50,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6366c567383c4f2c8ede53b165b8e7bb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/6366c567383c4f2c8ede53b165b8e7bb 2024-11-23T03:21:50,051 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9d6875fd51b94d00821a289b0a320a98 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/9d6875fd51b94d00821a289b0a320a98 2024-11-23T03:21:50,053 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/db9bb0fcee0140f08edf14aea2096c84 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/A/db9bb0fcee0140f08edf14aea2096c84 2024-11-23T03:21:50,058 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/8971b029d792488bb7a555a361ab6fb3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/8971b029d792488bb7a555a361ab6fb3 2024-11-23T03:21:50,059 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/90f4d2303afc4ffa975c74959c4e35db to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/90f4d2303afc4ffa975c74959c4e35db 2024-11-23T03:21:50,067 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/c80c766d98ea41f2ad2e88ba01dc3e4f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/B/c80c766d98ea41f2ad2e88ba01dc3e4f 2024-11-23T03:21:50,070 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/13efc076a6a14676934d2d3fdbdd0ccf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/13efc076a6a14676934d2d3fdbdd0ccf 2024-11-23T03:21:50,072 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5b92f4230bfa4fd5b82211ccedaefaa9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/5b92f4230bfa4fd5b82211ccedaefaa9 2024-11-23T03:21:50,074 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/7bc1bb938ca7483887d0ac04e85bf896 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/C/7bc1bb938ca7483887d0ac04e85bf896 2024-11-23T03:21:50,077 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits/385.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed/recovered.edits/385.seqid 2024-11-23T03:21:50,078 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,078 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:21:50,079 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:21:50,080 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T03:21:50,086 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230fb5567849f649eaacd6649ae90e101f_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230fb5567849f649eaacd6649ae90e101f_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,087 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231937d31b41f945b289519a834760ad64_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231937d31b41f945b289519a834760ad64_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,089 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232612a3a3df954c5cab51cab3fc11eb89_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232612a3a3df954c5cab51cab3fc11eb89_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,091 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123287b5c563d65499ea005859ff8283d08_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123287b5c563d65499ea005859ff8283d08_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,093 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232947793eba174adfac5bc6b8c16ad391_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232947793eba174adfac5bc6b8c16ad391_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,094 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a3f1ee3a7e14e759a1bb0ade99ba2d2_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a3f1ee3a7e14e759a1bb0ade99ba2d2_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,097 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b329d4f3b9246a192a94e3ebdc141fa_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b329d4f3b9246a192a94e3ebdc141fa_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,098 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236369a8482a794d3ebd969199e134b8d1_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236369a8482a794d3ebd969199e134b8d1_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,100 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b0b85616a6d4decadf387e28e6cfdf2_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b0b85616a6d4decadf387e28e6cfdf2_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,102 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236e67c5f581894272842c6d25b37499b0_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236e67c5f581894272842c6d25b37499b0_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,103 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236fee4c63e27044d585d0912896c253a5_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236fee4c63e27044d585d0912896c253a5_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,105 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123999766c6cd774580bc6d9c6e37143558_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123999766c6cd774580bc6d9c6e37143558_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,107 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c80c661b4a845629b47654177715244_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c80c661b4a845629b47654177715244_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,109 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a7f681605d3044e891eadc596673c72f_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a7f681605d3044e891eadc596673c72f_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,110 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c06935d8b269463bbbe2460dd5ac1715_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c06935d8b269463bbbe2460dd5ac1715_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,112 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c61ac326788a4a629e45dfd52eb430ad_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c61ac326788a4a629e45dfd52eb430ad_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,114 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb60d114a9dd4dda878704f3789a947c_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb60d114a9dd4dda878704f3789a947c_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,115 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d7972585a335479b96f61e90096d1d0c_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d7972585a335479b96f61e90096d1d0c_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,117 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e8a74b26db884213b10621b4503ba927_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e8a74b26db884213b10621b4503ba927_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,118 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f63ce3b78d5047ef8eb6d0b390d29953_52f48ab0d0f5382239ab1e35c83b29ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f63ce3b78d5047ef8eb6d0b390d29953_52f48ab0d0f5382239ab1e35c83b29ed 2024-11-23T03:21:50,121 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:21:50,123 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-23T03:21:50,132 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:21:50,135 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:21:50,140 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,140 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:21:50,140 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332110140"}]},"ts":"9223372036854775807"} 2024-11-23T03:21:50,142 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:21:50,143 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 52f48ab0d0f5382239ab1e35c83b29ed, NAME => 'TestAcidGuarantees,,1732332076461.52f48ab0d0f5382239ab1e35c83b29ed.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:21:50,143 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:21:50,143 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332110143"}]},"ts":"9223372036854775807"} 2024-11-23T03:21:50,145 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:21:50,148 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 126 msec 2024-11-23T03:21:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-23T03:21:50,326 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-23T03:21:50,337 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=237 (was 238), OpenFileDescriptor=452 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=304 (was 246) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4801 (was 4524) - AvailableMemoryMB LEAK? - 2024-11-23T03:21:50,348 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=304, ProcessCount=11, AvailableMemoryMB=4798 2024-11-23T03:21:50,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:21:50,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:21:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:21:50,351 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:21:50,351 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:50,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-23T03:21:50,352 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:21:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:50,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742068_1244 (size=960) 2024-11-23T03:21:50,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:50,761 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:21:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742069_1245 (size=53) 2024-11-23T03:21:50,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2aadefa362142abf479e6e4ecc26db15, disabling compactions & flushes 2024-11-23T03:21:51,168 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. after waiting 0 ms 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,168 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,168 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:51,169 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:21:51,169 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332111169"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332111169"}]},"ts":"1732332111169"} 2024-11-23T03:21:51,171 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:21:51,171 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:21:51,171 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332111171"}]},"ts":"1732332111171"} 2024-11-23T03:21:51,172 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:21:51,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, ASSIGN}] 2024-11-23T03:21:51,178 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, ASSIGN 2024-11-23T03:21:51,178 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:21:51,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=2aadefa362142abf479e6e4ecc26db15, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:51,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure 2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:21:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:51,482 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:51,485 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,485 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:21:51,485 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,485 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:21:51,485 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,485 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,487 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,488 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:51,488 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2aadefa362142abf479e6e4ecc26db15 columnFamilyName A 2024-11-23T03:21:51,489 DEBUG [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:51,489 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(327): Store=2aadefa362142abf479e6e4ecc26db15/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:51,489 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,491 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:51,491 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2aadefa362142abf479e6e4ecc26db15 columnFamilyName B 2024-11-23T03:21:51,501 DEBUG [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:51,501 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(327): Store=2aadefa362142abf479e6e4ecc26db15/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:51,502 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,503 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:21:51,503 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2aadefa362142abf479e6e4ecc26db15 columnFamilyName C 2024-11-23T03:21:51,504 DEBUG [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:21:51,504 INFO [StoreOpener-2aadefa362142abf479e6e4ecc26db15-1 {}] regionserver.HStore(327): Store=2aadefa362142abf479e6e4ecc26db15/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:21:51,504 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,505 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,505 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,507 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:21:51,508 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:51,511 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:21:51,511 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened 2aadefa362142abf479e6e4ecc26db15; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68057778, jitterRate=0.014139920473098755}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:21:51,512 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:51,513 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., pid=68, masterSystemTime=1732332111481 2024-11-23T03:21:51,514 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,514 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:51,514 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=2aadefa362142abf479e6e4ecc26db15, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:21:51,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-23T03:21:51,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure 2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 in 185 msec 2024-11-23T03:21:51,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-23T03:21:51,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, ASSIGN in 339 msec 2024-11-23T03:21:51,518 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:21:51,518 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332111518"}]},"ts":"1732332111518"} 2024-11-23T03:21:51,519 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:21:51,522 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:21:51,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1720 sec 2024-11-23T03:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T03:21:52,457 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-23T03:21:52,458 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64dc42d9 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58341641 2024-11-23T03:21:52,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b6adc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,463 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,465 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,466 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:21:52,467 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36464, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:21:52,469 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-23T03:21:52,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,473 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-23T03:21:52,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-23T03:21:52,480 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,481 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-23T03:21:52,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,485 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-11-23T03:21:52,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,489 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-23T03:21:52,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,494 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-23T03:21:52,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,499 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-23T03:21:52,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,502 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-23T03:21:52,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,508 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-11-23T03:21:52,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:21:52,516 DEBUG [hconnection-0x6c64b47f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,517 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,527 DEBUG [hconnection-0x7c8b27ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,528 DEBUG [hconnection-0x8383a32-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,529 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,529 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,529 DEBUG [hconnection-0x232c5eea-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:52,530 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-23T03:21:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:52,532 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:52,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:52,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:52,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:52,541 DEBUG [hconnection-0x32a8a0b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,542 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,564 DEBUG [hconnection-0x280e126a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,565 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,581 DEBUG [hconnection-0x4f3ad13f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,582 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,588 DEBUG [hconnection-0x3e458b6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,589 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,593 DEBUG [hconnection-0x57865412-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,594 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332172595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332172595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332172597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332172598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,602 DEBUG [hconnection-0x40a08cff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:21:52,603 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:21:52,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332172605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8d5826b42ebd4a289df88aa085767654 is 50, key is test_row_0/A:col10/1732332112531/Put/seqid=0 2024-11-23T03:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:52,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742070_1246 (size=12001) 2024-11-23T03:21:52,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8d5826b42ebd4a289df88aa085767654 2024-11-23T03:21:52,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:52,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:52,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332172699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332172699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332172701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/930de58f875b41db89b094768dd34285 is 50, key is test_row_0/B:col10/1732332112531/Put/seqid=0 2024-11-23T03:21:52,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332172709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332172702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742071_1247 (size=12001) 2024-11-23T03:21:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:52,838 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:52,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:52,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332172902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332172904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332172905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332172912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332172914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:52,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:52,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:52,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:52,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:52,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/930de58f875b41db89b094768dd34285 2024-11-23T03:21:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:53,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:53,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4e1a8ca0eff044fabf2562cd0138c72c is 50, key is test_row_0/C:col10/1732332112531/Put/seqid=0 2024-11-23T03:21:53,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742072_1248 (size=12001) 2024-11-23T03:21:53,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4e1a8ca0eff044fabf2562cd0138c72c 2024-11-23T03:21:53,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8d5826b42ebd4a289df88aa085767654 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654 2024-11-23T03:21:53,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:21:53,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/930de58f875b41db89b094768dd34285 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285 2024-11-23T03:21:53,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:21:53,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4e1a8ca0eff044fabf2562cd0138c72c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c 2024-11-23T03:21:53,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c, entries=150, sequenceid=13, filesize=11.7 K 2024-11-23T03:21:53,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 2aadefa362142abf479e6e4ecc26db15 in 665ms, sequenceid=13, compaction requested=false 2024-11-23T03:21:53,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:53,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332173213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332173214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332173214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332173216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332173216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/5ad8051bf2b144b9be10e24ad0799475 is 50, key is test_row_0/A:col10/1732332112595/Put/seqid=0 2024-11-23T03:21:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742073_1249 (size=12001) 2024-11-23T03:21:53,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/5ad8051bf2b144b9be10e24ad0799475 2024-11-23T03:21:53,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/52af50c2bda1403a9592d47d647a47bf is 50, key is test_row_0/B:col10/1732332112595/Put/seqid=0 2024-11-23T03:21:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742074_1250 (size=12001) 2024-11-23T03:21:53,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/52af50c2bda1403a9592d47d647a47bf 2024-11-23T03:21:53,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:53,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:53,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:53,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:53,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/acf3ff1a41ee428587fe501e48609b84 is 50, key is test_row_0/C:col10/1732332112595/Put/seqid=0 2024-11-23T03:21:53,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332173319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332173319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332173321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742075_1251 (size=12001) 2024-11-23T03:21:53,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/acf3ff1a41ee428587fe501e48609b84 2024-11-23T03:21:53,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/5ad8051bf2b144b9be10e24ad0799475 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475 2024-11-23T03:21:53,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T03:21:53,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/52af50c2bda1403a9592d47d647a47bf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf 2024-11-23T03:21:53,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T03:21:53,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/acf3ff1a41ee428587fe501e48609b84 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84 2024-11-23T03:21:53,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T03:21:53,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 2aadefa362142abf479e6e4ecc26db15 in 166ms, sequenceid=40, compaction requested=false 2024-11-23T03:21:53,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:53,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:53,453 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:53,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:53,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a77eebc8e08a4d609a24c2806aa5c281 is 50, key is test_row_0/A:col10/1732332113212/Put/seqid=0 2024-11-23T03:21:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742076_1252 (size=12001) 2024-11-23T03:21:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:53,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:53,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332173557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332173559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332173561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332173662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332173662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332173664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332173720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332173722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332173866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332173867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,869 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a77eebc8e08a4d609a24c2806aa5c281 2024-11-23T03:21:53,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:53,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332173868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:53,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/def4909d56ef43d79eb09c0bfb1aca20 is 50, key is test_row_0/B:col10/1732332113212/Put/seqid=0 2024-11-23T03:21:53,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742077_1253 (size=12001) 2024-11-23T03:21:54,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332174168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332174169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332174171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,315 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/def4909d56ef43d79eb09c0bfb1aca20 2024-11-23T03:21:54,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ca2e4b9463664ecba139541e57103d6f is 50, key is test_row_0/C:col10/1732332113212/Put/seqid=0 2024-11-23T03:21:54,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742078_1254 (size=12001) 2024-11-23T03:21:54,342 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ca2e4b9463664ecba139541e57103d6f 2024-11-23T03:21:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a77eebc8e08a4d609a24c2806aa5c281 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281 2024-11-23T03:21:54,359 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T03:21:54,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/def4909d56ef43d79eb09c0bfb1aca20 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20 2024-11-23T03:21:54,366 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T03:21:54,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ca2e4b9463664ecba139541e57103d6f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f 2024-11-23T03:21:54,378 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T03:21:54,379 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 2aadefa362142abf479e6e4ecc26db15 in 925ms, sequenceid=49, compaction requested=true 2024-11-23T03:21:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T03:21:54,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-23T03:21:54,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-23T03:21:54,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8490 sec 2024-11-23T03:21:54,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.8550 sec 2024-11-23T03:21:54,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T03:21:54,637 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-23T03:21:54,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:54,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-23T03:21:54,640 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:54,641 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:54,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:54,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:54,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:54,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:54,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:54,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/c441600ea7744637a3340c26d7a41bd5 is 50, key is test_row_0/A:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:54,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332174683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332174685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332174685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742079_1255 (size=14341) 2024-11-23T03:21:54,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332174724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332174727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:54,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332174786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332174788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332174789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,793 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:54,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:54,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:54,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:54,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:54,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:54,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:54,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:54,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:54,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:54,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332174991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332174991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:54,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:54,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332174994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/c441600ea7744637a3340c26d7a41bd5 2024-11-23T03:21:55,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/1cad97051bab4b1ba94022955b4ad319 is 50, key is test_row_0/B:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:55,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742080_1256 (size=12001) 2024-11-23T03:21:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:55,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332175294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332175294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332175296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:21:55,406 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/1cad97051bab4b1ba94022955b4ad319 2024-11-23T03:21:55,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/f1c2ddf85c4c487e83dd6f21b3e201e4 is 50, key is test_row_0/C:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:55,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742081_1257 (size=12001) 2024-11-23T03:21:55,559 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:55,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332175800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332175800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332175805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:55,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:55,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:55,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:55,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/f1c2ddf85c4c487e83dd6f21b3e201e4 2024-11-23T03:21:55,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/c441600ea7744637a3340c26d7a41bd5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5 2024-11-23T03:21:55,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5, entries=200, sequenceid=77, filesize=14.0 K 2024-11-23T03:21:55,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/1cad97051bab4b1ba94022955b4ad319 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319 2024-11-23T03:21:55,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:21:55,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/f1c2ddf85c4c487e83dd6f21b3e201e4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4 2024-11-23T03:21:55,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:21:55,954 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 2aadefa362142abf479e6e4ecc26db15 in 1279ms, sequenceid=77, compaction requested=true 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:55,954 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:55,954 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:55,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:55,955 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:55,956 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:21:55,956 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,956 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=49.2 K 2024-11-23T03:21:55,956 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:55,956 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:21:55,956 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:55,956 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=46.9 K 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d5826b42ebd4a289df88aa085767654, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332112531 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 930de58f875b41db89b094768dd34285, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332112531 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ad8051bf2b144b9be10e24ad0799475, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332112592 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 52af50c2bda1403a9592d47d647a47bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332112592 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting def4909d56ef43d79eb09c0bfb1aca20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732332113212 2024-11-23T03:21:55,957 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting a77eebc8e08a4d609a24c2806aa5c281, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732332113212 2024-11-23T03:21:55,958 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cad97051bab4b1ba94022955b4ad319, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:55,958 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c441600ea7744637a3340c26d7a41bd5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:55,971 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#210 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:55,971 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/d7b0db9a26244aabbc85d02f5151f67e is 50, key is test_row_0/A:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:55,976 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#211 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:55,977 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/fa4a7fceecff4bbfa1e8f719c4c7c243 is 50, key is test_row_0/B:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742082_1258 (size=12139) 2024-11-23T03:21:56,007 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/d7b0db9a26244aabbc85d02f5151f67e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/d7b0db9a26244aabbc85d02f5151f67e 2024-11-23T03:21:56,014 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into d7b0db9a26244aabbc85d02f5151f67e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:56,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:56,014 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=12, startTime=1732332115954; duration=0sec 2024-11-23T03:21:56,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:56,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:21:56,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:56,015 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:56,016 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:21:56,016 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,016 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=46.9 K 2024-11-23T03:21:56,016 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e1a8ca0eff044fabf2562cd0138c72c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732332112531 2024-11-23T03:21:56,017 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting acf3ff1a41ee428587fe501e48609b84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332112592 2024-11-23T03:21:56,017 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca2e4b9463664ecba139541e57103d6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732332113212 2024-11-23T03:21:56,018 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1c2ddf85c4c487e83dd6f21b3e201e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:56,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T03:21:56,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,019 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T03:21:56,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:56,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742083_1259 (size=12139) 2024-11-23T03:21:56,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/0ec0e58778cd4a6b939610f267af07df is 50, key is test_row_0/A:col10/1732332114677/Put/seqid=0 2024-11-23T03:21:56,035 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/fa4a7fceecff4bbfa1e8f719c4c7c243 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/fa4a7fceecff4bbfa1e8f719c4c7c243 2024-11-23T03:21:56,042 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into fa4a7fceecff4bbfa1e8f719c4c7c243(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:56,042 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:56,042 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=12, startTime=1732332115954; duration=0sec 2024-11-23T03:21:56,042 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:56,042 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:21:56,043 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#213 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:56,044 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1069d1cc81e04eb39f5fb286186e5859 is 50, key is test_row_0/C:col10/1732332113559/Put/seqid=0 2024-11-23T03:21:56,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742084_1260 (size=12001) 2024-11-23T03:21:56,054 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/0ec0e58778cd4a6b939610f267af07df 2024-11-23T03:21:56,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742085_1261 (size=12139) 2024-11-23T03:21:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/36d2d176978f404e980bc5fffa191907 is 50, key is test_row_0/B:col10/1732332114677/Put/seqid=0 2024-11-23T03:21:56,076 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1069d1cc81e04eb39f5fb286186e5859 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1069d1cc81e04eb39f5fb286186e5859 2024-11-23T03:21:56,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742086_1262 (size=12001) 2024-11-23T03:21:56,080 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/36d2d176978f404e980bc5fffa191907 2024-11-23T03:21:56,084 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 1069d1cc81e04eb39f5fb286186e5859(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:56,084 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:56,084 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=12, startTime=1732332115954; duration=0sec 2024-11-23T03:21:56,084 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:56,085 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:21:56,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/355160e8256342e5b293a9e554045358 is 50, key is test_row_0/C:col10/1732332114677/Put/seqid=0 2024-11-23T03:21:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742087_1263 (size=12001) 2024-11-23T03:21:56,523 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/355160e8256342e5b293a9e554045358 2024-11-23T03:21:56,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/0ec0e58778cd4a6b939610f267af07df as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df 2024-11-23T03:21:56,540 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T03:21:56,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/36d2d176978f404e980bc5fffa191907 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907 2024-11-23T03:21:56,548 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T03:21:56,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/355160e8256342e5b293a9e554045358 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358 2024-11-23T03:21:56,557 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T03:21:56,557 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 2aadefa362142abf479e6e4ecc26db15 in 538ms, sequenceid=87, compaction requested=false 2024-11-23T03:21:56,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:56,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T03:21:56,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-23T03:21:56,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-23T03:21:56,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9180 sec 2024-11-23T03:21:56,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.9220 sec 2024-11-23T03:21:56,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:56,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:56,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T03:21:56,746 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-23T03:21:56,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:21:56,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1dae759dd154459fb1c04669a5ef5d2b is 50, key is test_row_0/A:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:56,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-23T03:21:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:56,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:21:56,751 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:21:56,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:21:56,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742088_1264 (size=12001) 2024-11-23T03:21:56,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1dae759dd154459fb1c04669a5ef5d2b 2024-11-23T03:21:56,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9342d5bfdac64a69b8a4d0785775a5e8 is 50, key is test_row_0/B:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:56,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742089_1265 (size=12001) 2024-11-23T03:21:56,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9342d5bfdac64a69b8a4d0785775a5e8 2024-11-23T03:21:56,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/2d5c011b9fcc4df9b5a22b433b0333cd is 50, key is test_row_0/C:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:56,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332176787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332176789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742090_1266 (size=12001) 2024-11-23T03:21:56,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/2d5c011b9fcc4df9b5a22b433b0333cd 2024-11-23T03:21:56,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1dae759dd154459fb1c04669a5ef5d2b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b 2024-11-23T03:21:56,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b, entries=150, sequenceid=100, filesize=11.7 K 2024-11-23T03:21:56,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9342d5bfdac64a69b8a4d0785775a5e8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8 2024-11-23T03:21:56,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332176806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8, entries=150, sequenceid=100, filesize=11.7 K 2024-11-23T03:21:56,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/2d5c011b9fcc4df9b5a22b433b0333cd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd 2024-11-23T03:21:56,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332176808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332176809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd, entries=150, sequenceid=100, filesize=11.7 K 2024-11-23T03:21:56,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2aadefa362142abf479e6e4ecc26db15 in 72ms, sequenceid=100, compaction requested=true 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:56,815 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:56,815 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:56,824 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:56,824 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:56,824 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:21:56,824 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:21:56,824 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,825 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,825 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/d7b0db9a26244aabbc85d02f5151f67e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.3 K 2024-11-23T03:21:56,825 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/fa4a7fceecff4bbfa1e8f719c4c7c243, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.3 K 2024-11-23T03:21:56,825 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7b0db9a26244aabbc85d02f5151f67e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:56,825 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fa4a7fceecff4bbfa1e8f719c4c7c243, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:56,826 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ec0e58778cd4a6b939610f267af07df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732332114677 2024-11-23T03:21:56,826 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dae759dd154459fb1c04669a5ef5d2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:56,826 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 36d2d176978f404e980bc5fffa191907, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732332114677 2024-11-23T03:21:56,827 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9342d5bfdac64a69b8a4d0785775a5e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:56,835 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:56,835 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9533907c63f54da8a04f2b7c9cbab514 is 50, key is test_row_0/A:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:56,837 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:56,837 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/cda5dcab369b48fa8d7a679cdebe959e is 50, key is test_row_0/B:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:56,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:56,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742091_1267 (size=12241) 2024-11-23T03:21:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742092_1268 (size=12241) 2024-11-23T03:21:56,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:56,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:21:56,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:56,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:56,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:56,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:56,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a7e4370c277e4b6db785e7f3ae58c41b is 50, key is test_row_0/A:col10/1732332116893/Put/seqid=0 2024-11-23T03:21:56,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:56,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:56,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:56,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:56,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:56,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:56,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742093_1269 (size=12001) 2024-11-23T03:21:56,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a7e4370c277e4b6db785e7f3ae58c41b 2024-11-23T03:21:56,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332176904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:56,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332176906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:56,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a8ff111eff984f51b05a42ccf8930563 is 50, key is test_row_0/B:col10/1732332116893/Put/seqid=0 2024-11-23T03:21:56,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742094_1270 (size=12001) 2024-11-23T03:21:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332177009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332177011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:57,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,209 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332177211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332177214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,269 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/cda5dcab369b48fa8d7a679cdebe959e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/cda5dcab369b48fa8d7a679cdebe959e 2024-11-23T03:21:57,276 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into cda5dcab369b48fa8d7a679cdebe959e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:57,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:57,276 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332116815; duration=0sec 2024-11-23T03:21:57,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:57,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:21:57,276 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:57,278 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9533907c63f54da8a04f2b7c9cbab514 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9533907c63f54da8a04f2b7c9cbab514 2024-11-23T03:21:57,278 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:57,278 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:21:57,279 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,279 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1069d1cc81e04eb39f5fb286186e5859, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.3 K 2024-11-23T03:21:57,279 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1069d1cc81e04eb39f5fb286186e5859, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332113553 2024-11-23T03:21:57,281 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 355160e8256342e5b293a9e554045358, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732332114677 2024-11-23T03:21:57,281 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d5c011b9fcc4df9b5a22b433b0333cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:57,285 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 9533907c63f54da8a04f2b7c9cbab514(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:57,286 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:57,286 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332116815; duration=0sec 2024-11-23T03:21:57,286 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:57,286 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:21:57,292 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#223 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:57,293 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/732838f49ff243c2af5ac555ac1fe557 is 50, key is test_row_0/C:col10/1732332116742/Put/seqid=0 2024-11-23T03:21:57,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742095_1271 (size=12241) 2024-11-23T03:21:57,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a8ff111eff984f51b05a42ccf8930563 2024-11-23T03:21:57,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8cdb0a36052549dbb6ce292e30db4102 is 50, key is test_row_0/C:col10/1732332116893/Put/seqid=0 2024-11-23T03:21:57,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:57,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742096_1272 (size=12001) 2024-11-23T03:21:57,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8cdb0a36052549dbb6ce292e30db4102 2024-11-23T03:21:57,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a7e4370c277e4b6db785e7f3ae58c41b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b 2024-11-23T03:21:57,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T03:21:57,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a8ff111eff984f51b05a42ccf8930563 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563 2024-11-23T03:21:57,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T03:21:57,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8cdb0a36052549dbb6ce292e30db4102 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102 2024-11-23T03:21:57,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T03:21:57,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 2aadefa362142abf479e6e4ecc26db15 in 499ms, sequenceid=127, compaction requested=false 2024-11-23T03:21:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:57,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:57,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:57,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:57,525 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a2201dc0a6ff4f2ab814f26406b8dbc7 is 50, key is test_row_0/A:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:57,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742097_1273 (size=12151) 2024-11-23T03:21:57,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a2201dc0a6ff4f2ab814f26406b8dbc7 2024-11-23T03:21:57,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/956e42e3cc4941c7b2053e5559bf4be0 is 50, key is test_row_0/B:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:57,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332177637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332177643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742098_1274 (size=12151) 2024-11-23T03:21:57,679 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,705 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/732838f49ff243c2af5ac555ac1fe557 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/732838f49ff243c2af5ac555ac1fe557 2024-11-23T03:21:57,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 732838f49ff243c2af5ac555ac1fe557(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:57,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:57,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=13, startTime=1732332116815; duration=0sec 2024-11-23T03:21:57,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:57,714 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:21:57,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332177748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332177753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:57,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332177955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332177965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:57,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:57,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/956e42e3cc4941c7b2053e5559bf4be0 2024-11-23T03:21:58,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1f5fe6bf788548b2a76b3fe3303bf460 is 50, key is test_row_0/C:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:58,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742099_1275 (size=12151) 2024-11-23T03:21:58,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1f5fe6bf788548b2a76b3fe3303bf460 2024-11-23T03:21:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/a2201dc0a6ff4f2ab814f26406b8dbc7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7 2024-11-23T03:21:58,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7, entries=150, sequenceid=140, filesize=11.9 K 2024-11-23T03:21:58,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/956e42e3cc4941c7b2053e5559bf4be0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0 2024-11-23T03:21:58,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0, entries=150, sequenceid=140, filesize=11.9 K 2024-11-23T03:21:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1f5fe6bf788548b2a76b3fe3303bf460 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460 2024-11-23T03:21:58,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460, entries=150, sequenceid=140, filesize=11.9 K 2024-11-23T03:21:58,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 2aadefa362142abf479e6e4ecc26db15 in 665ms, sequenceid=140, compaction requested=true 2024-11-23T03:21:58,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:58,188 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:58,189 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:58,190 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:21:58,190 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,190 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9533907c63f54da8a04f2b7c9cbab514, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.5 K 2024-11-23T03:21:58,190 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9533907c63f54da8a04f2b7c9cbab514, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:58,191 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7e4370c277e4b6db785e7f3ae58c41b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732332116788 2024-11-23T03:21:58,191 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2201dc0a6ff4f2ab814f26406b8dbc7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:21:58,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:58,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:58,191 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:58,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:58,193 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:58,193 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:21:58,193 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,193 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/cda5dcab369b48fa8d7a679cdebe959e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.5 K 2024-11-23T03:21:58,195 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cda5dcab369b48fa8d7a679cdebe959e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:58,196 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a8ff111eff984f51b05a42ccf8930563, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732332116788 2024-11-23T03:21:58,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:58,196 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 956e42e3cc4941c7b2053e5559bf4be0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:21:58,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:58,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:58,215 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:58,216 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b09a80ede2144376adc5441f402e2ecb is 50, key is test_row_0/A:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:58,223 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:58,224 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/c94ddfa363bf4c0a9d314978749ba391 is 50, key is test_row_0/B:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:58,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:21:58,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:58,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:58,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742100_1276 (size=12493) 2024-11-23T03:21:58,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8ba414dceb4c4b04aaffea30a32c7935 is 50, key is test_row_0/A:col10/1732332117621/Put/seqid=0 2024-11-23T03:21:58,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742101_1277 (size=12493) 2024-11-23T03:21:58,290 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b09a80ede2144376adc5441f402e2ecb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b09a80ede2144376adc5441f402e2ecb 2024-11-23T03:21:58,298 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into b09a80ede2144376adc5441f402e2ecb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:58,298 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:58,298 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332118188; duration=0sec 2024-11-23T03:21:58,299 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:58,299 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:21:58,299 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:21:58,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:21:58,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:21:58,301 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,301 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/732838f49ff243c2af5ac555ac1fe557, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=35.5 K 2024-11-23T03:21:58,302 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/c94ddfa363bf4c0a9d314978749ba391 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/c94ddfa363bf4c0a9d314978749ba391 2024-11-23T03:21:58,302 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 732838f49ff243c2af5ac555ac1fe557, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732332116736 2024-11-23T03:21:58,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332178298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,304 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cdb0a36052549dbb6ce292e30db4102, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732332116788 2024-11-23T03:21:58,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,305 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f5fe6bf788548b2a76b3fe3303bf460, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:21:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332178300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,311 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into c94ddfa363bf4c0a9d314978749ba391(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:58,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:58,311 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332118191; duration=0sec 2024-11-23T03:21:58,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:58,311 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:21:58,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742102_1278 (size=14541) 2024-11-23T03:21:58,346 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:58,347 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/3acd7812c4ac416cad64fe357e77d4bb is 50, key is test_row_0/C:col10/1732332117522/Put/seqid=0 2024-11-23T03:21:58,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742103_1279 (size=12493) 2024-11-23T03:21:58,408 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/3acd7812c4ac416cad64fe357e77d4bb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/3acd7812c4ac416cad64fe357e77d4bb 2024-11-23T03:21:58,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332178404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332178409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,413 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 3acd7812c4ac416cad64fe357e77d4bb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:21:58,413 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:58,414 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=13, startTime=1732332118196; duration=0sec 2024-11-23T03:21:58,414 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:58,414 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:21:58,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,612 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332178613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332178615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8ba414dceb4c4b04aaffea30a32c7935 2024-11-23T03:21:58,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/47e84e481ea444af9258755d8111fa48 is 50, key is test_row_0/B:col10/1732332117621/Put/seqid=0 2024-11-23T03:21:58,767 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:21:58,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742104_1280 (size=12151) 2024-11-23T03:21:58,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/47e84e481ea444af9258755d8111fa48 2024-11-23T03:21:58,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b47b9b9a38434a3392c7d85063ab4b7a is 50, key is test_row_0/C:col10/1732332117621/Put/seqid=0 2024-11-23T03:21:58,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332178813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,818 DEBUG [Thread-1132 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:58,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332178826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,829 DEBUG [Thread-1140 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:58,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:58,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332178831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,834 DEBUG [Thread-1136 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:21:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:21:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742105_1281 (size=12151) 2024-11-23T03:21:58,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b47b9b9a38434a3392c7d85063ab4b7a 2024-11-23T03:21:58,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/8ba414dceb4c4b04aaffea30a32c7935 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935 2024-11-23T03:21:58,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935, entries=200, sequenceid=168, filesize=14.2 K 2024-11-23T03:21:58,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/47e84e481ea444af9258755d8111fa48 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48 2024-11-23T03:21:58,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48, entries=150, sequenceid=168, filesize=11.9 K 2024-11-23T03:21:58,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b47b9b9a38434a3392c7d85063ab4b7a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a 2024-11-23T03:21:58,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a, entries=150, sequenceid=168, filesize=11.9 K 2024-11-23T03:21:58,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 2aadefa362142abf479e6e4ecc26db15 in 622ms, sequenceid=168, compaction requested=false 2024-11-23T03:21:58,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:58,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:21:58,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:58,923 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:58,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:58,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:21:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:58,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ae1e0df6bd2545fb8ff1617bedd8f279 is 50, key is test_row_0/A:col10/1732332118295/Put/seqid=0 2024-11-23T03:21:58,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742106_1282 (size=12151) 2024-11-23T03:21:58,998 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ae1e0df6bd2545fb8ff1617bedd8f279 2024-11-23T03:21:59,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/af781bb66538496b8ae3d9bc710e0ba5 is 50, key is test_row_0/B:col10/1732332118295/Put/seqid=0 2024-11-23T03:21:59,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332179025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742107_1283 (size=12151) 2024-11-23T03:21:59,030 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/af781bb66538496b8ae3d9bc710e0ba5 2024-11-23T03:21:59,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332179029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4bfb11f037454e5481aa85471a50ea49 is 50, key is test_row_0/C:col10/1732332118295/Put/seqid=0 2024-11-23T03:21:59,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742108_1284 (size=12151) 2024-11-23T03:21:59,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332179130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332179137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332179339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332179343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4bfb11f037454e5481aa85471a50ea49 2024-11-23T03:21:59,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ae1e0df6bd2545fb8ff1617bedd8f279 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279 2024-11-23T03:21:59,502 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279, entries=150, sequenceid=180, filesize=11.9 K 2024-11-23T03:21:59,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/af781bb66538496b8ae3d9bc710e0ba5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5 2024-11-23T03:21:59,509 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5, entries=150, sequenceid=180, filesize=11.9 K 2024-11-23T03:21:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/4bfb11f037454e5481aa85471a50ea49 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49 2024-11-23T03:21:59,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49, entries=150, sequenceid=180, filesize=11.9 K 2024-11-23T03:21:59,527 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 2aadefa362142abf479e6e4ecc26db15 in 604ms, sequenceid=180, compaction requested=true 2024-11-23T03:21:59,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:59,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:59,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-23T03:21:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-23T03:21:59,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-23T03:21:59,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7780 sec 2024-11-23T03:21:59,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 2.7910 sec 2024-11-23T03:21:59,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:59,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-23T03:21:59,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:59,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9cc8df74069d40ce9df51ec7a7bab6ca is 50, key is test_row_0/A:col10/1732332119024/Put/seqid=0 2024-11-23T03:21:59,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332179662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332179665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742109_1285 (size=14541) 2024-11-23T03:21:59,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9cc8df74069d40ce9df51ec7a7bab6ca 2024-11-23T03:21:59,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/91dfc198a11840c0a09458e3f54edc72 is 50, key is test_row_0/B:col10/1732332119024/Put/seqid=0 2024-11-23T03:21:59,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742110_1286 (size=12151) 2024-11-23T03:21:59,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/91dfc198a11840c0a09458e3f54edc72 2024-11-23T03:21:59,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b6d872e7a8494290be928c898ab45aab is 50, key is test_row_0/C:col10/1732332119024/Put/seqid=0 2024-11-23T03:21:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332179767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:21:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332179770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:21:59,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742111_1287 (size=12151) 2024-11-23T03:21:59,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b6d872e7a8494290be928c898ab45aab 2024-11-23T03:21:59,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9cc8df74069d40ce9df51ec7a7bab6ca as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca 2024-11-23T03:21:59,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca, entries=200, sequenceid=209, filesize=14.2 K 2024-11-23T03:21:59,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/91dfc198a11840c0a09458e3f54edc72 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72 2024-11-23T03:21:59,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72, entries=150, sequenceid=209, filesize=11.9 K 2024-11-23T03:21:59,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b6d872e7a8494290be928c898ab45aab as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab 2024-11-23T03:21:59,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab, entries=150, sequenceid=209, filesize=11.9 K 2024-11-23T03:21:59,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 2aadefa362142abf479e6e4ecc26db15 in 191ms, sequenceid=209, compaction requested=true 2024-11-23T03:21:59,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:21:59,842 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:21:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:59,843 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:21:59,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:21:59,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:21:59,844 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:59,844 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:21:59,844 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:59,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:21:59,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:21:59,844 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b09a80ede2144376adc5441f402e2ecb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=52.5 K 2024-11-23T03:21:59,845 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:21:59,845 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:21:59,845 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:21:59,845 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/c94ddfa363bf4c0a9d314978749ba391, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=47.8 K 2024-11-23T03:21:59,845 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b09a80ede2144376adc5441f402e2ecb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:21:59,846 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c94ddfa363bf4c0a9d314978749ba391, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:21:59,846 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ba414dceb4c4b04aaffea30a32c7935, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732332117620 2024-11-23T03:21:59,846 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae1e0df6bd2545fb8ff1617bedd8f279, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732332118286 2024-11-23T03:21:59,846 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 47e84e481ea444af9258755d8111fa48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732332117620 2024-11-23T03:21:59,846 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cc8df74069d40ce9df51ec7a7bab6ca, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119007 2024-11-23T03:21:59,847 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting af781bb66538496b8ae3d9bc710e0ba5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732332118286 2024-11-23T03:21:59,847 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 91dfc198a11840c0a09458e3f54edc72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119019 2024-11-23T03:21:59,864 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#240 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:59,864 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/3895135bbeb5435bb85ec84a84e61a37 is 50, key is test_row_0/B:col10/1732332119024/Put/seqid=0 2024-11-23T03:21:59,865 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#241 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:21:59,869 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/7451f7ef75144fc2ad1c3af950a6bc43 is 50, key is test_row_0/A:col10/1732332119024/Put/seqid=0 2024-11-23T03:21:59,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742112_1288 (size=12629) 2024-11-23T03:21:59,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742113_1289 (size=12629) 2024-11-23T03:21:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:21:59,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:21:59,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:21:59,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:59,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:21:59,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:21:59,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:21:59,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:00,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/332a69392ce949de861b8b57101a93be is 50, key is test_row_0/A:col10/1732332119981/Put/seqid=0 2024-11-23T03:22:00,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742114_1290 (size=9757) 2024-11-23T03:22:00,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332180074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332180082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332180183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332180186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,308 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/3895135bbeb5435bb85ec84a84e61a37 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/3895135bbeb5435bb85ec84a84e61a37 2024-11-23T03:22:00,315 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 3895135bbeb5435bb85ec84a84e61a37(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:00,315 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:00,315 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=12, startTime=1732332119843; duration=0sec 2024-11-23T03:22:00,315 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:00,315 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:00,315 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:00,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:00,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:00,320 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:00,321 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/3acd7812c4ac416cad64fe357e77d4bb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=47.8 K 2024-11-23T03:22:00,321 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3acd7812c4ac416cad64fe357e77d4bb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732332116899 2024-11-23T03:22:00,323 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b47b9b9a38434a3392c7d85063ab4b7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732332117620 2024-11-23T03:22:00,323 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bfb11f037454e5481aa85471a50ea49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732332118286 2024-11-23T03:22:00,324 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/7451f7ef75144fc2ad1c3af950a6bc43 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7451f7ef75144fc2ad1c3af950a6bc43 2024-11-23T03:22:00,325 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b6d872e7a8494290be928c898ab45aab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119019 2024-11-23T03:22:00,334 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 7451f7ef75144fc2ad1c3af950a6bc43(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:00,334 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:00,334 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=12, startTime=1732332119842; duration=0sec 2024-11-23T03:22:00,334 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:00,334 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:00,344 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:00,345 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/cd75c8b2740e409b9d67bbee27d5ee8e is 50, key is test_row_0/C:col10/1732332119024/Put/seqid=0 2024-11-23T03:22:00,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332180387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332180389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742115_1291 (size=12629) 2024-11-23T03:22:00,406 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/cd75c8b2740e409b9d67bbee27d5ee8e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/cd75c8b2740e409b9d67bbee27d5ee8e 2024-11-23T03:22:00,412 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into cd75c8b2740e409b9d67bbee27d5ee8e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:00,412 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:00,412 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=12, startTime=1732332119844; duration=0sec 2024-11-23T03:22:00,413 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:00,413 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:00,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/332a69392ce949de861b8b57101a93be 2024-11-23T03:22:00,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/440cffca44614b1e930eaea478d4487f is 50, key is test_row_0/B:col10/1732332119981/Put/seqid=0 2024-11-23T03:22:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742116_1292 (size=9757) 2024-11-23T03:22:00,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/440cffca44614b1e930eaea478d4487f 2024-11-23T03:22:00,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c03ca5d182684fe0a23e47819f89b1bb is 50, key is test_row_0/C:col10/1732332119981/Put/seqid=0 2024-11-23T03:22:00,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742117_1293 (size=9757) 2024-11-23T03:22:00,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332180691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:00,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332180694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:00,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T03:22:00,855 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-23T03:22:00,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:00,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-23T03:22:00,859 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:00,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:00,860 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:00,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:00,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c03ca5d182684fe0a23e47819f89b1bb 2024-11-23T03:22:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:00,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/332a69392ce949de861b8b57101a93be as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be 2024-11-23T03:22:00,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be, entries=100, sequenceid=221, filesize=9.5 K 2024-11-23T03:22:00,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/440cffca44614b1e930eaea478d4487f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f 2024-11-23T03:22:00,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f, entries=100, sequenceid=221, filesize=9.5 K 2024-11-23T03:22:00,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c03ca5d182684fe0a23e47819f89b1bb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb 2024-11-23T03:22:00,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb, entries=100, sequenceid=221, filesize=9.5 K 2024-11-23T03:22:01,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2aadefa362142abf479e6e4ecc26db15 in 1011ms, sequenceid=221, compaction requested=false 2024-11-23T03:22:01,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:01,011 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T03:22:01,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:01,012 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:01,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:01,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b9a92a86b15b4bcfbfb6c4c26884800b is 50, key is test_row_0/A:col10/1732332120070/Put/seqid=0 2024-11-23T03:22:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742118_1294 (size=12151) 2024-11-23T03:22:01,071 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b9a92a86b15b4bcfbfb6c4c26884800b 2024-11-23T03:22:01,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9c795ef44bf845ffbf912ae3dc04e350 is 50, key is test_row_0/B:col10/1732332120070/Put/seqid=0 2024-11-23T03:22:01,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742119_1295 (size=12151) 2024-11-23T03:22:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:01,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:01,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332181219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332181224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332181326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332181329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:01,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332181532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332181533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,552 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9c795ef44bf845ffbf912ae3dc04e350 2024-11-23T03:22:01,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b1e5eee038574d11ae992193c960732a is 50, key is test_row_0/C:col10/1732332120070/Put/seqid=0 2024-11-23T03:22:01,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742120_1296 (size=12151) 2024-11-23T03:22:01,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332181838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332181840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:01,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:02,020 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b1e5eee038574d11ae992193c960732a 2024-11-23T03:22:02,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b9a92a86b15b4bcfbfb6c4c26884800b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b 2024-11-23T03:22:02,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:22:02,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/9c795ef44bf845ffbf912ae3dc04e350 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350 2024-11-23T03:22:02,045 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:22:02,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b1e5eee038574d11ae992193c960732a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a 2024-11-23T03:22:02,052 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:22:02,053 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 2aadefa362142abf479e6e4ecc26db15 in 1041ms, sequenceid=248, compaction requested=true 2024-11-23T03:22:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:02,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-23T03:22:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-23T03:22:02,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-23T03:22:02,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1940 sec 2024-11-23T03:22:02,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.2010 sec 2024-11-23T03:22:02,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:02,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:02,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:02,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/2ecef38eb9e74b59a06a112923f334e5 is 50, key is test_row_0/A:col10/1732332121222/Put/seqid=0 2024-11-23T03:22:02,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742121_1297 (size=14691) 2024-11-23T03:22:02,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/2ecef38eb9e74b59a06a112923f334e5 2024-11-23T03:22:02,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/46b98dbddd6941b8a96a473086a49762 is 50, key is test_row_0/B:col10/1732332121222/Put/seqid=0 2024-11-23T03:22:02,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332182427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332182428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742122_1298 (size=12251) 2024-11-23T03:22:02,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332182540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332182540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332182748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332182750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332182840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,845 DEBUG [Thread-1132 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:02,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332182846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,849 DEBUG [Thread-1140 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:02,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:02,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332182853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:02,857 DEBUG [Thread-1136 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:02,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/46b98dbddd6941b8a96a473086a49762 2024-11-23T03:22:02,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8f14bc556633431db5982c2b08351e56 is 50, key is test_row_0/C:col10/1732332121222/Put/seqid=0 2024-11-23T03:22:02,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742123_1299 (size=12251) 2024-11-23T03:22:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T03:22:02,967 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-23T03:22:02,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-23T03:22:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T03:22:02,977 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:02,982 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:02,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:03,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332183054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332183058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T03:22:03,135 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T03:22:03,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:03,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T03:22:03,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T03:22:03,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:03,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:03,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8f14bc556633431db5982c2b08351e56 2024-11-23T03:22:03,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/2ecef38eb9e74b59a06a112923f334e5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5 2024-11-23T03:22:03,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5, entries=200, sequenceid=262, filesize=14.3 K 2024-11-23T03:22:03,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/46b98dbddd6941b8a96a473086a49762 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762 2024-11-23T03:22:03,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T03:22:03,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/8f14bc556633431db5982c2b08351e56 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56 2024-11-23T03:22:03,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T03:22:03,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 2aadefa362142abf479e6e4ecc26db15 in 1039ms, sequenceid=262, compaction requested=true 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:03,387 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:03,387 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:03,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:03,389 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49228 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:03,389 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:03,389 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,389 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7451f7ef75144fc2ad1c3af950a6bc43, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=48.1 K 2024-11-23T03:22:03,390 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46788 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:03,390 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7451f7ef75144fc2ad1c3af950a6bc43, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119019 2024-11-23T03:22:03,390 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:03,390 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,390 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/3895135bbeb5435bb85ec84a84e61a37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=45.7 K 2024-11-23T03:22:03,391 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 332a69392ce949de861b8b57101a93be, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732332119981 2024-11-23T03:22:03,391 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3895135bbeb5435bb85ec84a84e61a37, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119019 2024-11-23T03:22:03,392 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 440cffca44614b1e930eaea478d4487f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732332119981 2024-11-23T03:22:03,392 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9a92a86b15b4bcfbfb6c4c26884800b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332120061 2024-11-23T03:22:03,393 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ecef38eb9e74b59a06a112923f334e5, keycount=200, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732332121211 2024-11-23T03:22:03,393 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c795ef44bf845ffbf912ae3dc04e350, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332120061 2024-11-23T03:22:03,393 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 46b98dbddd6941b8a96a473086a49762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732332121211 2024-11-23T03:22:03,408 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#252 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:03,409 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9376ac32ea204f558005122ae5a94134 is 50, key is test_row_0/A:col10/1732332121222/Put/seqid=0 2024-11-23T03:22:03,422 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#253 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:03,423 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/66106baa9136475f9c31786f8db2c666 is 50, key is test_row_0/B:col10/1732332121222/Put/seqid=0 2024-11-23T03:22:03,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742124_1300 (size=12865) 2024-11-23T03:22:03,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742125_1301 (size=12865) 2024-11-23T03:22:03,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T03:22:03,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,444 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:03,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/27c5e7286f2c4026b75330f32facba2f is 50, key is test_row_0/A:col10/1732332122422/Put/seqid=0 2024-11-23T03:22:03,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742126_1302 (size=12301) 2024-11-23T03:22:03,518 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/27c5e7286f2c4026b75330f32facba2f 2024-11-23T03:22:03,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/66b5ebe6da394ec6a79f395d3d60b99b is 50, key is test_row_0/B:col10/1732332122422/Put/seqid=0 2024-11-23T03:22:03,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:03,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T03:22:03,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742127_1303 (size=12301) 2024-11-23T03:22:03,590 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/66b5ebe6da394ec6a79f395d3d60b99b 2024-11-23T03:22:03,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/e8cb54c24c6b4126b3292cb2e827aab5 is 50, key is test_row_0/C:col10/1732332122422/Put/seqid=0 2024-11-23T03:22:03,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332183611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332183614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742128_1304 (size=12301) 2024-11-23T03:22:03,637 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/e8cb54c24c6b4126b3292cb2e827aab5 2024-11-23T03:22:03,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/27c5e7286f2c4026b75330f32facba2f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f 2024-11-23T03:22:03,653 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T03:22:03,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/66b5ebe6da394ec6a79f395d3d60b99b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b 2024-11-23T03:22:03,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T03:22:03,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/e8cb54c24c6b4126b3292cb2e827aab5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5 2024-11-23T03:22:03,669 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T03:22:03,670 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 2aadefa362142abf479e6e4ecc26db15 in 226ms, sequenceid=285, compaction requested=true 2024-11-23T03:22:03,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-23T03:22:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-23T03:22:03,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-23T03:22:03,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 689 msec 2024-11-23T03:22:03,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 706 msec 2024-11-23T03:22:03,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:03,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:22:03,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:03,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/84aa7604fa3843618e67b20bb9682712 is 50, key is test_row_0/A:col10/1732332123724/Put/seqid=0 2024-11-23T03:22:03,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742129_1305 (size=14741) 2024-11-23T03:22:03,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/84aa7604fa3843618e67b20bb9682712 2024-11-23T03:22:03,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332183803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332183809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/01e7250c4f2b4e69910b99f38d716d0f is 50, key is test_row_0/B:col10/1732332123724/Put/seqid=0 2024-11-23T03:22:03,851 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/66106baa9136475f9c31786f8db2c666 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66106baa9136475f9c31786f8db2c666 2024-11-23T03:22:03,851 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/9376ac32ea204f558005122ae5a94134 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9376ac32ea204f558005122ae5a94134 2024-11-23T03:22:03,859 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 66106baa9136475f9c31786f8db2c666(size=12.6 K), total size for store is 24.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:03,859 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,859 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=12, startTime=1732332123387; duration=0sec 2024-11-23T03:22:03,859 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:03,859 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:03,860 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T03:22:03,861 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 59089 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T03:22:03,861 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:03,862 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,862 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/cd75c8b2740e409b9d67bbee27d5ee8e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=57.7 K 2024-11-23T03:22:03,862 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cd75c8b2740e409b9d67bbee27d5ee8e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732332119019 2024-11-23T03:22:03,863 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c03ca5d182684fe0a23e47819f89b1bb, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732332119981 2024-11-23T03:22:03,863 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b1e5eee038574d11ae992193c960732a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332120061 2024-11-23T03:22:03,865 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f14bc556633431db5982c2b08351e56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732332121211 2024-11-23T03:22:03,865 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 9376ac32ea204f558005122ae5a94134(size=12.6 K), total size for store is 24.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:03,865 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,865 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=12, startTime=1732332123387; duration=0sec 2024-11-23T03:22:03,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:03,866 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:03,866 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e8cb54c24c6b4126b3292cb2e827aab5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732332122414 2024-11-23T03:22:03,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742130_1306 (size=12301) 2024-11-23T03:22:03,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/01e7250c4f2b4e69910b99f38d716d0f 2024-11-23T03:22:03,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/dca9e57a2b9e499894eaaa8ae483009a is 50, key is test_row_0/C:col10/1732332123724/Put/seqid=0 2024-11-23T03:22:03,904 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#260 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:03,905 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1c759218964a4baa8f527cc684df263a is 50, key is test_row_0/C:col10/1732332122422/Put/seqid=0 2024-11-23T03:22:03,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332183911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332183917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:03,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742131_1307 (size=12301) 2024-11-23T03:22:03,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/dca9e57a2b9e499894eaaa8ae483009a 2024-11-23T03:22:03,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/84aa7604fa3843618e67b20bb9682712 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712 2024-11-23T03:22:03,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712, entries=200, sequenceid=300, filesize=14.4 K 2024-11-23T03:22:03,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/01e7250c4f2b4e69910b99f38d716d0f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f 2024-11-23T03:22:03,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f, entries=150, sequenceid=300, filesize=12.0 K 2024-11-23T03:22:03,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/dca9e57a2b9e499894eaaa8ae483009a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a 2024-11-23T03:22:03,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a, entries=150, sequenceid=300, filesize=12.0 K 2024-11-23T03:22:03,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 2aadefa362142abf479e6e4ecc26db15 in 228ms, sequenceid=300, compaction requested=true 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:03,953 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:22:03,955 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39907 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:03,955 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:03,955 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,955 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9376ac32ea204f558005122ae5a94134, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=39.0 K 2024-11-23T03:22:03,955 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9376ac32ea204f558005122ae5a94134, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732332121211 2024-11-23T03:22:03,956 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27c5e7286f2c4026b75330f32facba2f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732332122414 2024-11-23T03:22:03,956 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84aa7604fa3843618e67b20bb9682712, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732332123593 2024-11-23T03:22:03,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742132_1308 (size=12949) 2024-11-23T03:22:03,975 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#261 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:03,975 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/21829ce55aba493db03e59df2c5d4202 is 50, key is test_row_0/A:col10/1732332123724/Put/seqid=0 2024-11-23T03:22:03,982 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1c759218964a4baa8f527cc684df263a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1c759218964a4baa8f527cc684df263a 2024-11-23T03:22:03,989 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 1c759218964a4baa8f527cc684df263a(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:03,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:03,989 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=11, startTime=1732332123387; duration=0sec 2024-11-23T03:22:03,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:22:03,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:03,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:03,989 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:03,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:03,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:03,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:03,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66106baa9136475f9c31786f8db2c666, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.6 K 2024-11-23T03:22:03,991 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 66106baa9136475f9c31786f8db2c666, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732332121211 2024-11-23T03:22:03,992 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 66b5ebe6da394ec6a79f395d3d60b99b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732332122414 2024-11-23T03:22:03,993 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 01e7250c4f2b4e69910b99f38d716d0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732332123602 2024-11-23T03:22:04,036 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#262 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:04,037 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/2bdb3410eeae4eb9a06a65da89d8bdba is 50, key is test_row_0/B:col10/1732332123724/Put/seqid=0 2024-11-23T03:22:04,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742133_1309 (size=13017) 2024-11-23T03:22:04,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742134_1310 (size=13017) 2024-11-23T03:22:04,074 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/2bdb3410eeae4eb9a06a65da89d8bdba as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2bdb3410eeae4eb9a06a65da89d8bdba 2024-11-23T03:22:04,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T03:22:04,080 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-23T03:22:04,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:04,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-23T03:22:04,085 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:04,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:04,085 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:04,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:04,088 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 2bdb3410eeae4eb9a06a65da89d8bdba(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:04,088 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:04,088 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332123953; duration=0sec 2024-11-23T03:22:04,088 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:04,088 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:04,088 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:22:04,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:22:04,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:22:04,090 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. because compaction request was cancelled 2024-11-23T03:22:04,090 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:04,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:04,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/7d61c7021b4c4e73a5762304fa288d2f is 50, key is test_row_0/A:col10/1732332124121/Put/seqid=0 2024-11-23T03:22:04,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332184167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332184171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742135_1311 (size=14741) 2024-11-23T03:22:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:04,238 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:04,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:04,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332184275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332184278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:04,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:04,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:04,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,445 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/21829ce55aba493db03e59df2c5d4202 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/21829ce55aba493db03e59df2c5d4202 2024-11-23T03:22:04,450 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 21829ce55aba493db03e59df2c5d4202(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:04,450 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:04,450 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332123953; duration=0sec 2024-11-23T03:22:04,451 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:04,451 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:04,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332184480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332184484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:04,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:04,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/7d61c7021b4c4e73a5762304fa288d2f 2024-11-23T03:22:04,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4516b8d002434343b018ea7994192aec is 50, key is test_row_0/B:col10/1732332124121/Put/seqid=0 2024-11-23T03:22:04,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742136_1312 (size=12301) 2024-11-23T03:22:04,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4516b8d002434343b018ea7994192aec 2024-11-23T03:22:04,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ec514da191b2444096313ea02467e44e is 50, key is test_row_0/C:col10/1732332124121/Put/seqid=0 2024-11-23T03:22:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:04,700 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:04,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:04,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742137_1313 (size=12301) 2024-11-23T03:22:04,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ec514da191b2444096313ea02467e44e 2024-11-23T03:22:04,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/7d61c7021b4c4e73a5762304fa288d2f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f 2024-11-23T03:22:04,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f, entries=200, sequenceid=326, filesize=14.4 K 2024-11-23T03:22:04,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4516b8d002434343b018ea7994192aec as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec 2024-11-23T03:22:04,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec, entries=150, sequenceid=326, filesize=12.0 K 2024-11-23T03:22:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ec514da191b2444096313ea02467e44e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e 2024-11-23T03:22:04,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e, entries=150, sequenceid=326, filesize=12.0 K 2024-11-23T03:22:04,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 2aadefa362142abf479e6e4ecc26db15 in 637ms, sequenceid=326, compaction requested=true 2024-11-23T03:22:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:04,760 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:22:04,761 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:22:04,761 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:22:04,761 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. because compaction request was cancelled 2024-11-23T03:22:04,761 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:04,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:04,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:04,762 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:22:04,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:04,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:04,762 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:04,762 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:22:04,763 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:22:04,763 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. because compaction request was cancelled 2024-11-23T03:22:04,763 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:04,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:04,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:04,763 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:04,763 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:04,763 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,764 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1c759218964a4baa8f527cc684df263a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.7 K 2024-11-23T03:22:04,764 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c759218964a4baa8f527cc684df263a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732332122414 2024-11-23T03:22:04,765 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting dca9e57a2b9e499894eaaa8ae483009a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732332123602 2024-11-23T03:22:04,765 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ec514da191b2444096313ea02467e44e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332123801 2024-11-23T03:22:04,786 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#266 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:04,786 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1ce96f7bfeb74b8fa92d8961768c4b54 is 50, key is test_row_0/C:col10/1732332124121/Put/seqid=0 2024-11-23T03:22:04,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:04,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:04,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:04,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e17a18a2ccda4da98f920c8c19aeea2e is 50, key is test_row_0/A:col10/1732332124792/Put/seqid=0 2024-11-23T03:22:04,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742138_1314 (size=13051) 2024-11-23T03:22:04,854 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:04,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:04,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:04,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:04,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742139_1315 (size=14741) 2024-11-23T03:22:04,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332184860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332184861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332184965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:04,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332184966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:05,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332185172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:05,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332185172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:05,245 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1ce96f7bfeb74b8fa92d8961768c4b54 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ce96f7bfeb74b8fa92d8961768c4b54 2024-11-23T03:22:05,251 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 1ce96f7bfeb74b8fa92d8961768c4b54(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:05,251 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:05,251 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=13, startTime=1732332124762; duration=0sec 2024-11-23T03:22:05,251 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:05,251 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:05,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e17a18a2ccda4da98f920c8c19aeea2e 2024-11-23T03:22:05,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/151d37cea748443285d50fad10b38ade is 50, key is test_row_0/B:col10/1732332124792/Put/seqid=0 2024-11-23T03:22:05,315 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742140_1316 (size=12301) 2024-11-23T03:22:05,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332185476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:05,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332185479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,624 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/151d37cea748443285d50fad10b38ade 2024-11-23T03:22:05,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/562eade890c04336babf920daeb32b75 is 50, key is test_row_0/C:col10/1732332124792/Put/seqid=0 2024-11-23T03:22:05,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:05,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742141_1317 (size=12301) 2024-11-23T03:22:05,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/562eade890c04336babf920daeb32b75 2024-11-23T03:22:05,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e17a18a2ccda4da98f920c8c19aeea2e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e 2024-11-23T03:22:05,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e, entries=200, sequenceid=342, filesize=14.4 K 2024-11-23T03:22:05,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/151d37cea748443285d50fad10b38ade as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade 2024-11-23T03:22:05,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade, entries=150, sequenceid=342, filesize=12.0 K 2024-11-23T03:22:05,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/562eade890c04336babf920daeb32b75 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75 2024-11-23T03:22:05,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75, entries=150, sequenceid=342, filesize=12.0 K 2024-11-23T03:22:05,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 2aadefa362142abf479e6e4ecc26db15 in 1045ms, sequenceid=342, compaction requested=true 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:05,852 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:05,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:05,852 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:05,854 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:05,854 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:05,854 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:05,855 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:05,855 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,855 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,855 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2bdb3410eeae4eb9a06a65da89d8bdba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.7 K 2024-11-23T03:22:05,855 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/21829ce55aba493db03e59df2c5d4202, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=41.5 K 2024-11-23T03:22:05,855 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bdb3410eeae4eb9a06a65da89d8bdba, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732332123602 2024-11-23T03:22:05,855 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21829ce55aba493db03e59df2c5d4202, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732332123602 2024-11-23T03:22:05,856 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4516b8d002434343b018ea7994192aec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332123801 2024-11-23T03:22:05,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d61c7021b4c4e73a5762304fa288d2f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332123789 2024-11-23T03:22:05,856 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 151d37cea748443285d50fad10b38ade, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732332124146 2024-11-23T03:22:05,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e17a18a2ccda4da98f920c8c19aeea2e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732332124146 2024-11-23T03:22:05,885 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:05,886 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/5567517b9e1f49b2b988297b58702da8 is 50, key is test_row_0/B:col10/1732332124792/Put/seqid=0 2024-11-23T03:22:05,895 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:05,896 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b2bd2f8e45f9471b8866e67bc639a43f is 50, key is test_row_0/A:col10/1732332124792/Put/seqid=0 2024-11-23T03:22:05,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:05,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T03:22:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:05,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T03:22:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:05,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742142_1318 (size=13119) 2024-11-23T03:22:05,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:05,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:05,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742143_1319 (size=13119) 2024-11-23T03:22:06,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b50945ee43574b6a8deeec9f2d340b0b is 50, key is test_row_0/A:col10/1732332124860/Put/seqid=0 2024-11-23T03:22:06,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742144_1320 (size=12301) 2024-11-23T03:22:06,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332186026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332186030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332186136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332186144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:06,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332186350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332186353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,379 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/5567517b9e1f49b2b988297b58702da8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/5567517b9e1f49b2b988297b58702da8 2024-11-23T03:22:06,385 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 5567517b9e1f49b2b988297b58702da8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:06,386 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:06,386 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332125852; duration=0sec 2024-11-23T03:22:06,386 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:06,386 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:06,386 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T03:22:06,387 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T03:22:06,387 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T03:22:06,387 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. because compaction request was cancelled 2024-11-23T03:22:06,387 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:06,404 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b2bd2f8e45f9471b8866e67bc639a43f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b2bd2f8e45f9471b8866e67bc639a43f 2024-11-23T03:22:06,411 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into b2bd2f8e45f9471b8866e67bc639a43f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:06,411 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:06,411 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332125852; duration=0sec 2024-11-23T03:22:06,411 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:06,411 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:06,430 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b50945ee43574b6a8deeec9f2d340b0b 2024-11-23T03:22:06,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/682b7b2ec67541c2be7529456e2b067f is 50, key is test_row_0/B:col10/1732332124860/Put/seqid=0 2024-11-23T03:22:06,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742145_1321 (size=12301) 2024-11-23T03:22:06,518 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/682b7b2ec67541c2be7529456e2b067f 2024-11-23T03:22:06,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9fe8614d12b94e91a02d7e094a4fdf40 is 50, key is test_row_0/C:col10/1732332124860/Put/seqid=0 2024-11-23T03:22:06,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742146_1322 (size=12301) 2024-11-23T03:22:06,591 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9fe8614d12b94e91a02d7e094a4fdf40 2024-11-23T03:22:06,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/b50945ee43574b6a8deeec9f2d340b0b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b 2024-11-23T03:22:06,601 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b, entries=150, sequenceid=364, filesize=12.0 K 2024-11-23T03:22:06,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/682b7b2ec67541c2be7529456e2b067f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f 2024-11-23T03:22:06,606 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f, entries=150, sequenceid=364, filesize=12.0 K 2024-11-23T03:22:06,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9fe8614d12b94e91a02d7e094a4fdf40 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40 2024-11-23T03:22:06,611 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40, entries=150, sequenceid=364, filesize=12.0 K 2024-11-23T03:22:06,612 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2aadefa362142abf479e6e4ecc26db15 in 677ms, sequenceid=364, compaction requested=true 2024-11-23T03:22:06,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:06,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:06,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T03:22:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-23T03:22:06,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-23T03:22:06,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5290 sec 2024-11-23T03:22:06,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.5350 sec 2024-11-23T03:22:06,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:06,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:06,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/32d829de4fbe4ddc817cece53d58bbcd is 50, key is test_row_0/A:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:06,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332186733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332186735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742147_1323 (size=12301) 2024-11-23T03:22:06,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/32d829de4fbe4ddc817cece53d58bbcd 2024-11-23T03:22:06,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4f2472e8467348fc8a327522cf62f70f is 50, key is test_row_0/B:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:06,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742148_1324 (size=12301) 2024-11-23T03:22:06,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4f2472e8467348fc8a327522cf62f70f 2024-11-23T03:22:06,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332186839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332186849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:06,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6f83991b87964c1d91b9f881ada20ba0 is 50, key is test_row_0/C:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742149_1325 (size=12301) 2024-11-23T03:22:06,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6f83991b87964c1d91b9f881ada20ba0 2024-11-23T03:22:06,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/32d829de4fbe4ddc817cece53d58bbcd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd 2024-11-23T03:22:06,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T03:22:06,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/4f2472e8467348fc8a327522cf62f70f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f 2024-11-23T03:22:06,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T03:22:06,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6f83991b87964c1d91b9f881ada20ba0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0 2024-11-23T03:22:06,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T03:22:06,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 2aadefa362142abf479e6e4ecc26db15 in 266ms, sequenceid=383, compaction requested=true 2024-11-23T03:22:06,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:06,943 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:06,944 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:06,944 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:06,944 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:06,944 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:06,944 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b2bd2f8e45f9471b8866e67bc639a43f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.8 K 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:06,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:06,945 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2bd2f8e45f9471b8866e67bc639a43f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732332124146 2024-11-23T03:22:06,945 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:06,945 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:06,945 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:06,945 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/5567517b9e1f49b2b988297b58702da8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.8 K 2024-11-23T03:22:06,945 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b50945ee43574b6a8deeec9f2d340b0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732332124824 2024-11-23T03:22:06,946 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5567517b9e1f49b2b988297b58702da8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732332124146 2024-11-23T03:22:06,946 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 682b7b2ec67541c2be7529456e2b067f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732332124824 2024-11-23T03:22:06,946 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32d829de4fbe4ddc817cece53d58bbcd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:06,947 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f2472e8467348fc8a327522cf62f70f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:06,974 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#278 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:06,975 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ae17aa464482487abc70f3dc14f62052 is 50, key is test_row_0/A:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:06,982 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:06,982 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/62381e115b264f9cbe6efdab076bc358 is 50, key is test_row_0/B:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:07,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:07,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742151_1327 (size=13221) 2024-11-23T03:22:07,076 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/62381e115b264f9cbe6efdab076bc358 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/62381e115b264f9cbe6efdab076bc358 2024-11-23T03:22:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742150_1326 (size=13221) 2024-11-23T03:22:07,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/598e088b6eb44867829b2832351672df is 50, key is test_row_0/A:col10/1732332126722/Put/seqid=0 2024-11-23T03:22:07,089 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 62381e115b264f9cbe6efdab076bc358(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:07,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:07,089 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332126944; duration=0sec 2024-11-23T03:22:07,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:07,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:07,089 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:07,092 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:07,092 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:07,092 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:07,093 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ce96f7bfeb74b8fa92d8961768c4b54, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=48.8 K 2024-11-23T03:22:07,093 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ce96f7bfeb74b8fa92d8961768c4b54, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332123801 2024-11-23T03:22:07,093 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 562eade890c04336babf920daeb32b75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732332124146 2024-11-23T03:22:07,094 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fe8614d12b94e91a02d7e094a4fdf40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732332124824 2024-11-23T03:22:07,095 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f83991b87964c1d91b9f881ada20ba0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:07,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332187123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332187125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742152_1328 (size=12301) 2024-11-23T03:22:07,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/598e088b6eb44867829b2832351672df 2024-11-23T03:22:07,172 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:07,173 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/95f4a4679a8e4ecbb29330a36602ac8a is 50, key is test_row_0/C:col10/1732332126028/Put/seqid=0 2024-11-23T03:22:07,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/82dabee4e82a4c6cbe1f707e4cec3201 is 50, key is test_row_0/B:col10/1732332126722/Put/seqid=0 2024-11-23T03:22:07,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332187232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332187236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742154_1330 (size=12301) 2024-11-23T03:22:07,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742153_1329 (size=13187) 2024-11-23T03:22:07,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332187441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332187449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,488 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ae17aa464482487abc70f3dc14f62052 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae17aa464482487abc70f3dc14f62052 2024-11-23T03:22:07,495 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into ae17aa464482487abc70f3dc14f62052(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:07,495 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:07,495 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332126943; duration=0sec 2024-11-23T03:22:07,495 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:07,495 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:07,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/82dabee4e82a4c6cbe1f707e4cec3201 2024-11-23T03:22:07,701 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/95f4a4679a8e4ecbb29330a36602ac8a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/95f4a4679a8e4ecbb29330a36602ac8a 2024-11-23T03:22:07,716 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 95f4a4679a8e4ecbb29330a36602ac8a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:07,716 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:07,716 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=12, startTime=1732332126944; duration=0sec 2024-11-23T03:22:07,716 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:07,716 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:07,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/50b1fedac86c499f9376da640c2ead34 is 50, key is test_row_0/C:col10/1732332126722/Put/seqid=0 2024-11-23T03:22:07,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332187754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742155_1331 (size=12301) 2024-11-23T03:22:07,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/50b1fedac86c499f9376da640c2ead34 2024-11-23T03:22:07,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:07,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332187759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:07,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/598e088b6eb44867829b2832351672df as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df 2024-11-23T03:22:07,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df, entries=150, sequenceid=403, filesize=12.0 K 2024-11-23T03:22:07,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/82dabee4e82a4c6cbe1f707e4cec3201 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201 2024-11-23T03:22:07,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201, entries=150, sequenceid=403, filesize=12.0 K 2024-11-23T03:22:07,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/50b1fedac86c499f9376da640c2ead34 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34 2024-11-23T03:22:07,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34, entries=150, sequenceid=403, filesize=12.0 K 2024-11-23T03:22:07,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2aadefa362142abf479e6e4ecc26db15 in 738ms, sequenceid=403, compaction requested=false 2024-11-23T03:22:07,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T03:22:08,191 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-23T03:22:08,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-23T03:22:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:08,194 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:08,195 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:08,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:08,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:08,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/aa722fdd7dee4a85be62b779b6d8dd3d is 50, key is test_row_0/A:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:08,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:22:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:08,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742156_1332 (size=12301) 2024-11-23T03:22:08,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332188314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332188314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332188418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332188418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:08,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332188625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332188626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,657 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/aa722fdd7dee4a85be62b779b6d8dd3d 2024-11-23T03:22:08,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/89c6f42bf1504496907d559f9a3c26dd is 50, key is test_row_0/B:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:08,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742157_1333 (size=12301) 2024-11-23T03:22:08,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/89c6f42bf1504496907d559f9a3c26dd 2024-11-23T03:22:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:08,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/70d75619ec5846b7b43e5f982bf54677 is 50, key is test_row_0/C:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:08,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:08,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:08,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742158_1334 (size=12301) 2024-11-23T03:22:08,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/70d75619ec5846b7b43e5f982bf54677 2024-11-23T03:22:08,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/aa722fdd7dee4a85be62b779b6d8dd3d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d 2024-11-23T03:22:08,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d, entries=150, sequenceid=423, filesize=12.0 K 2024-11-23T03:22:08,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/89c6f42bf1504496907d559f9a3c26dd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd 2024-11-23T03:22:08,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd, entries=150, sequenceid=423, filesize=12.0 K 2024-11-23T03:22:08,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/70d75619ec5846b7b43e5f982bf54677 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677 2024-11-23T03:22:08,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677, entries=150, sequenceid=423, filesize=12.0 K 2024-11-23T03:22:08,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 2aadefa362142abf479e6e4ecc26db15 in 626ms, sequenceid=423, compaction requested=true 2024-11-23T03:22:08,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:08,892 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:08,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:08,892 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:08,893 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:08,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:08,893 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:08,893 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:08,893 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,893 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,893 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae17aa464482487abc70f3dc14f62052, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.9 K 2024-11-23T03:22:08,893 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/62381e115b264f9cbe6efdab076bc358, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.9 K 2024-11-23T03:22:08,894 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae17aa464482487abc70f3dc14f62052, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:08,894 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 62381e115b264f9cbe6efdab076bc358, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:08,894 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 598e088b6eb44867829b2832351672df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732332126722 2024-11-23T03:22:08,894 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 82dabee4e82a4c6cbe1f707e4cec3201, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732332126722 2024-11-23T03:22:08,894 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa722fdd7dee4a85be62b779b6d8dd3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:08,895 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 89c6f42bf1504496907d559f9a3c26dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:08,918 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:08,919 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e1a47f27bcf2435aa71cb3ef7e9cbece is 50, key is test_row_0/A:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:08,933 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:08,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:08,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:08,934 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/b421760bfc4e447cabd9afd3d7c380c8 is 50, key is test_row_0/B:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:08,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/83690458c3294b4bb92ed36edca6b2c3 is 50, key is test_row_0/A:col10/1732332128312/Put/seqid=0 2024-11-23T03:22:08,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:08,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:08,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:08,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:08,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332188978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:08,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332188978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:08,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742159_1335 (size=13323) 2024-11-23T03:22:08,992 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e1a47f27bcf2435aa71cb3ef7e9cbece as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e1a47f27bcf2435aa71cb3ef7e9cbece 2024-11-23T03:22:09,000 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into e1a47f27bcf2435aa71cb3ef7e9cbece(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:09,000 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:09,000 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332128892; duration=0sec 2024-11-23T03:22:09,000 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:09,000 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:09,000 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:09,002 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:09,002 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:09,002 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:09,002 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/95f4a4679a8e4ecbb29330a36602ac8a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=36.9 K 2024-11-23T03:22:09,002 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95f4a4679a8e4ecbb29330a36602ac8a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732332126022 2024-11-23T03:22:09,003 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50b1fedac86c499f9376da640c2ead34, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732332126722 2024-11-23T03:22:09,003 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70d75619ec5846b7b43e5f982bf54677, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742160_1336 (size=13323) 2024-11-23T03:22:09,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742161_1337 (size=14741) 2024-11-23T03:22:09,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/83690458c3294b4bb92ed36edca6b2c3 2024-11-23T03:22:09,036 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#290 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:09,037 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6acf26676b5a48bb977636d569073ffb is 50, key is test_row_0/C:col10/1732332127122/Put/seqid=0 2024-11-23T03:22:09,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/986170c2aaf84aa5b9a7fbb91d564a16 is 50, key is test_row_0/B:col10/1732332128312/Put/seqid=0 2024-11-23T03:22:09,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332189085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332189086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742162_1338 (size=13289) 2024-11-23T03:22:09,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742163_1339 (size=12301) 2024-11-23T03:22:09,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/986170c2aaf84aa5b9a7fbb91d564a16 2024-11-23T03:22:09,119 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:09,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:09,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:09,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:09,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:09,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:09,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:09,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/bd1b51689325400fa59cfe91b0fc290c is 50, key is test_row_0/C:col10/1732332128312/Put/seqid=0 2024-11-23T03:22:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742164_1340 (size=12301) 2024-11-23T03:22:09,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/bd1b51689325400fa59cfe91b0fc290c 2024-11-23T03:22:09,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/83690458c3294b4bb92ed36edca6b2c3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3 2024-11-23T03:22:09,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3, entries=200, sequenceid=443, filesize=14.4 K 2024-11-23T03:22:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/986170c2aaf84aa5b9a7fbb91d564a16 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16 2024-11-23T03:22:09,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16, entries=150, sequenceid=443, filesize=12.0 K 2024-11-23T03:22:09,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/bd1b51689325400fa59cfe91b0fc290c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c 2024-11-23T03:22:09,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c, entries=150, sequenceid=443, filesize=12.0 K 2024-11-23T03:22:09,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2aadefa362142abf479e6e4ecc26db15 in 289ms, sequenceid=443, compaction requested=false 2024-11-23T03:22:09,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:09,274 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:09,275 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:09,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/fbdbeb2b5145467db7c3355c697b647a is 50, key is test_row_0/A:col10/1732332128974/Put/seqid=0 2024-11-23T03:22:09,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:09,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:09,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:09,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742165_1341 (size=12301) 2024-11-23T03:22:09,308 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/fbdbeb2b5145467db7c3355c697b647a 2024-11-23T03:22:09,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/404c9bed86cb468c993a3833d46c8565 is 50, key is test_row_0/B:col10/1732332128974/Put/seqid=0 2024-11-23T03:22:09,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742166_1342 (size=12301) 2024-11-23T03:22:09,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332189332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332189333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,417 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/b421760bfc4e447cabd9afd3d7c380c8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/b421760bfc4e447cabd9afd3d7c380c8 2024-11-23T03:22:09,426 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into b421760bfc4e447cabd9afd3d7c380c8(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:09,426 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:09,426 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332128892; duration=0sec 2024-11-23T03:22:09,426 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:09,426 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:09,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332189436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332189436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,510 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6acf26676b5a48bb977636d569073ffb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6acf26676b5a48bb977636d569073ffb 2024-11-23T03:22:09,526 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 6acf26676b5a48bb977636d569073ffb(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:09,526 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:09,526 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=13, startTime=1732332128892; duration=0sec 2024-11-23T03:22:09,526 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:09,526 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:09,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332189642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332189642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,728 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/404c9bed86cb468c993a3833d46c8565 2024-11-23T03:22:09,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/29ed3dada253428ca0ee15c408eff1a9 is 50, key is test_row_0/C:col10/1732332128974/Put/seqid=0 2024-11-23T03:22:09,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742167_1343 (size=12301) 2024-11-23T03:22:09,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332189949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:09,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332189949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,210 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/29ed3dada253428ca0ee15c408eff1a9 2024-11-23T03:22:10,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/fbdbeb2b5145467db7c3355c697b647a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a 2024-11-23T03:22:10,221 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T03:22:10,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:22:10,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/404c9bed86cb468c993a3833d46c8565 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565 2024-11-23T03:22:10,235 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T03:22:10,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/29ed3dada253428ca0ee15c408eff1a9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9 2024-11-23T03:22:10,243 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T03:22:10,244 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2aadefa362142abf479e6e4ecc26db15 in 969ms, sequenceid=460, compaction requested=true 2024-11-23T03:22:10,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:10,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:10,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-23T03:22:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-23T03:22:10,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-23T03:22:10,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0510 sec 2024-11-23T03:22:10,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.0550 sec 2024-11-23T03:22:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T03:22:10,300 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-23T03:22:10,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-23T03:22:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T03:22:10,304 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:10,305 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:10,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T03:22:10,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T03:22:10,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:10,458 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:10,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:10,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ff231ffb70a8447ca194c633c370cb17 is 50, key is test_row_0/A:col10/1732332130456/Put/seqid=0 2024-11-23T03:22:10,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742168_1344 (size=14741) 2024-11-23T03:22:10,505 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ff231ffb70a8447ca194c633c370cb17 2024-11-23T03:22:10,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332190504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332190505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/8cc981fede2b4e079800fca6ee2a93ac is 50, key is test_row_0/B:col10/1732332130456/Put/seqid=0 2024-11-23T03:22:10,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742169_1345 (size=12301) 2024-11-23T03:22:10,560 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/8cc981fede2b4e079800fca6ee2a93ac 2024-11-23T03:22:10,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c07012883bb94a42a5b250057634ef1a is 50, key is test_row_0/C:col10/1732332130456/Put/seqid=0 2024-11-23T03:22:10,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T03:22:10,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332190612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332190613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742170_1346 (size=12301) 2024-11-23T03:22:10,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332190818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332190820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T03:22:11,027 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c07012883bb94a42a5b250057634ef1a 2024-11-23T03:22:11,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/ff231ffb70a8447ca194c633c370cb17 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17 2024-11-23T03:22:11,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17, entries=200, sequenceid=483, filesize=14.4 K 2024-11-23T03:22:11,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/8cc981fede2b4e079800fca6ee2a93ac as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac 2024-11-23T03:22:11,045 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac, entries=150, sequenceid=483, filesize=12.0 K 2024-11-23T03:22:11,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/c07012883bb94a42a5b250057634ef1a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a 2024-11-23T03:22:11,056 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a, entries=150, sequenceid=483, filesize=12.0 K 2024-11-23T03:22:11,057 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2aadefa362142abf479e6e4ecc26db15 in 599ms, sequenceid=483, compaction requested=true 2024-11-23T03:22:11,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:11,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-23T03:22:11,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-23T03:22:11,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-23T03:22:11,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 753 msec 2024-11-23T03:22:11,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 758 msec 2024-11-23T03:22:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:11,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/6cd70fef4b794d449ab3b203a72e3148 is 50, key is test_row_0/A:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742171_1347 (size=12301) 2024-11-23T03:22:11,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/6cd70fef4b794d449ab3b203a72e3148 2024-11-23T03:22:11,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332191181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 312 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332191185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/434362a2bf574a86b55f788000ba333c is 50, key is test_row_0/B:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742172_1348 (size=12301) 2024-11-23T03:22:11,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/434362a2bf574a86b55f788000ba333c 2024-11-23T03:22:11,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/696e856e73d14c56b6f38f71f6874457 is 50, key is test_row_0/C:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742173_1349 (size=12301) 2024-11-23T03:22:11,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332191288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 314 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332191289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T03:22:11,408 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-23T03:22:11,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-23T03:22:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T03:22:11,413 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:11,414 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:11,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:11,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332191492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332191492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T03:22:11,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T03:22:11,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:11,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:11,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:11,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/696e856e73d14c56b6f38f71f6874457 2024-11-23T03:22:11,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/6cd70fef4b794d449ab3b203a72e3148 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148 2024-11-23T03:22:11,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T03:22:11,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/434362a2bf574a86b55f788000ba333c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c 2024-11-23T03:22:11,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T03:22:11,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/696e856e73d14c56b6f38f71f6874457 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457 2024-11-23T03:22:11,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T03:22:11,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 2aadefa362142abf479e6e4ecc26db15 in 577ms, sequenceid=499, compaction requested=true 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:11,705 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T03:22:11,705 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:11,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:11,707 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62527 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T03:22:11,707 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:11,707 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,707 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/b421760bfc4e447cabd9afd3d7c380c8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=61.1 K 2024-11-23T03:22:11,709 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b421760bfc4e447cabd9afd3d7c380c8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:11,709 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 67407 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T03:22:11,709 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:11,709 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,710 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e1a47f27bcf2435aa71cb3ef7e9cbece, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=65.8 K 2024-11-23T03:22:11,710 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 986170c2aaf84aa5b9a7fbb91d564a16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732332128309 2024-11-23T03:22:11,710 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1a47f27bcf2435aa71cb3ef7e9cbece, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:11,710 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 404c9bed86cb468c993a3833d46c8565, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732332128956 2024-11-23T03:22:11,711 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83690458c3294b4bb92ed36edca6b2c3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732332128309 2024-11-23T03:22:11,711 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cc981fede2b4e079800fca6ee2a93ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732332129330 2024-11-23T03:22:11,711 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbdbeb2b5145467db7c3355c697b647a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732332128956 2024-11-23T03:22:11,711 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 434362a2bf574a86b55f788000ba333c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:11,711 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff231ffb70a8447ca194c633c370cb17, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732332129313 2024-11-23T03:22:11,712 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cd70fef4b794d449ab3b203a72e3148, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T03:22:11,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,730 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:11,736 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#302 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:11,737 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/2333cc3e041e47c0be9c5a8a286e70d8 is 50, key is test_row_0/B:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,747 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:11,748 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/4e858ef81ea44b0a8f9588289553a605 is 50, key is test_row_0/A:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e4bf21770a764c2782e68989bc384126 is 50, key is test_row_0/A:col10/1732332131178/Put/seqid=0 2024-11-23T03:22:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:11,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742174_1350 (size=13493) 2024-11-23T03:22:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742176_1352 (size=12301) 2024-11-23T03:22:11,836 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e4bf21770a764c2782e68989bc384126 2024-11-23T03:22:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/f979842a22d745c986ae99d20f5144cd is 50, key is test_row_0/B:col10/1732332131178/Put/seqid=0 2024-11-23T03:22:11,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742175_1351 (size=13493) 2024-11-23T03:22:11,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332191847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 324 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332191852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,865 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/4e858ef81ea44b0a8f9588289553a605 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/4e858ef81ea44b0a8f9588289553a605 2024-11-23T03:22:11,872 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 4e858ef81ea44b0a8f9588289553a605(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:11,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:11,872 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=11, startTime=1732332131705; duration=0sec 2024-11-23T03:22:11,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:11,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:11,872 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T03:22:11,875 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62493 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T03:22:11,875 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:11,875 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:11,875 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6acf26676b5a48bb977636d569073ffb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=61.0 K 2024-11-23T03:22:11,876 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6acf26676b5a48bb977636d569073ffb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732332127122 2024-11-23T03:22:11,876 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd1b51689325400fa59cfe91b0fc290c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=443, earliestPutTs=1732332128309 2024-11-23T03:22:11,876 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29ed3dada253428ca0ee15c408eff1a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732332128956 2024-11-23T03:22:11,877 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c07012883bb94a42a5b250057634ef1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732332129330 2024-11-23T03:22:11,877 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 696e856e73d14c56b6f38f71f6874457, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:11,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742177_1353 (size=12301) 2024-11-23T03:22:11,890 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/f979842a22d745c986ae99d20f5144cd 2024-11-23T03:22:11,915 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#306 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:11,916 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/aba09aba886042879048c8d04f41dd2d is 50, key is test_row_0/C:col10/1732332130500/Put/seqid=0 2024-11-23T03:22:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b563e23362944a6bbfd395246170bcb9 is 50, key is test_row_0/C:col10/1732332131178/Put/seqid=0 2024-11-23T03:22:11,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 326 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332191967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742178_1354 (size=13459) 2024-11-23T03:22:11,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:11,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332191968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:11,979 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/aba09aba886042879048c8d04f41dd2d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/aba09aba886042879048c8d04f41dd2d 2024-11-23T03:22:11,987 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into aba09aba886042879048c8d04f41dd2d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:11,987 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:11,987 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=11, startTime=1732332131705; duration=0sec 2024-11-23T03:22:11,988 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:11,988 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:11,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742179_1355 (size=12301) 2024-11-23T03:22:11,994 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b563e23362944a6bbfd395246170bcb9 2024-11-23T03:22:12,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/e4bf21770a764c2782e68989bc384126 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126 2024-11-23T03:22:12,006 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126, entries=150, sequenceid=520, filesize=12.0 K 2024-11-23T03:22:12,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/f979842a22d745c986ae99d20f5144cd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd 2024-11-23T03:22:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T03:22:12,018 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd, entries=150, sequenceid=520, filesize=12.0 K 2024-11-23T03:22:12,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/b563e23362944a6bbfd395246170bcb9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9 2024-11-23T03:22:12,024 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9, entries=150, sequenceid=520, filesize=12.0 K 2024-11-23T03:22:12,025 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2aadefa362142abf479e6e4ecc26db15 in 294ms, sequenceid=520, compaction requested=false 2024-11-23T03:22:12,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:12,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-23T03:22:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-23T03:22:12,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-23T03:22:12,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 612 msec 2024-11-23T03:22:12,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 619 msec 2024-11-23T03:22:12,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:12,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:12,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:12,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1be278dd925b48f09adf72d416d90a92 is 50, key is test_row_0/A:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:12,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742180_1356 (size=12301) 2024-11-23T03:22:12,233 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/2333cc3e041e47c0be9c5a8a286e70d8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2333cc3e041e47c0be9c5a8a286e70d8 2024-11-23T03:22:12,242 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into 2333cc3e041e47c0be9c5a8a286e70d8(size=13.2 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:12,242 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:12,242 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=11, startTime=1732332131705; duration=0sec 2024-11-23T03:22:12,242 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:12,242 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:12,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 338 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332192247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332192252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 340 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332192353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332192354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T03:22:12,518 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-23T03:22:12,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:12,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-23T03:22:12,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T03:22:12,521 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:12,522 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:12,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:12,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 342 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332192557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332192559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,584 DEBUG [Thread-1145 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:61411 2024-11-23T03:22:12,584 DEBUG [Thread-1145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:12,585 DEBUG [Thread-1143 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:61411 2024-11-23T03:22:12,585 DEBUG [Thread-1143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:12,586 DEBUG [Thread-1151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:61411 2024-11-23T03:22:12,586 DEBUG [Thread-1151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:12,588 DEBUG [Thread-1149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:61411 2024-11-23T03:22:12,589 DEBUG [Thread-1149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:12,596 DEBUG [Thread-1147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:61411 2024-11-23T03:22:12,596 DEBUG [Thread-1147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:12,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T03:22:12,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1be278dd925b48f09adf72d416d90a92 2024-11-23T03:22:12,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/25746b75a6b549aabaa4d5314a5d4a7a is 50, key is test_row_0/B:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:12,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742181_1357 (size=12301) 2024-11-23T03:22:12,674 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T03:22:12,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:12,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T03:22:12,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T03:22:12,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 344 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53748 deadline: 1732332192861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53830 deadline: 1732332192863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53784 deadline: 1732332192870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,871 DEBUG [Thread-1132 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:12,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53744 deadline: 1732332192910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,911 DEBUG [Thread-1136 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18226 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53758 deadline: 1732332192922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,923 DEBUG [Thread-1140 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18238 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:12,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:12,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T03:22:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:12,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:13,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/25746b75a6b549aabaa4d5314a5d4a7a 2024-11-23T03:22:13,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ea8931bb98284f7baeca15015f089a1e is 50, key is test_row_0/C:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:13,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742182_1358 (size=12301) 2024-11-23T03:22:13,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ea8931bb98284f7baeca15015f089a1e 2024-11-23T03:22:13,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/1be278dd925b48f09adf72d416d90a92 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92 2024-11-23T03:22:13,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92, entries=150, sequenceid=538, filesize=12.0 K 2024-11-23T03:22:13,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/25746b75a6b549aabaa4d5314a5d4a7a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a 2024-11-23T03:22:13,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a, entries=150, sequenceid=538, filesize=12.0 K 2024-11-23T03:22:13,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/ea8931bb98284f7baeca15015f089a1e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e 2024-11-23T03:22:13,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e, entries=150, sequenceid=538, filesize=12.0 K 2024-11-23T03:22:13,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2aadefa362142abf479e6e4ecc26db15 in 902ms, sequenceid=538, compaction requested=true 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aadefa362142abf479e6e4ecc26db15:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:13,076 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:13,076 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:13,077 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:13,077 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:13,077 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/B is initiating minor compaction (all files) 2024-11-23T03:22:13,078 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/A is initiating minor compaction (all files) 2024-11-23T03:22:13,078 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/B in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:13,078 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/A in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:13,078 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2333cc3e041e47c0be9c5a8a286e70d8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=37.2 K 2024-11-23T03:22:13,078 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/4e858ef81ea44b0a8f9588289553a605, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=37.2 K 2024-11-23T03:22:13,078 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2333cc3e041e47c0be9c5a8a286e70d8, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:13,078 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e858ef81ea44b0a8f9588289553a605, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:13,078 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f979842a22d745c986ae99d20f5144cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=520, earliestPutTs=1732332131172 2024-11-23T03:22:13,078 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4bf21770a764c2782e68989bc384126, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=520, earliestPutTs=1732332131172 2024-11-23T03:22:13,079 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 25746b75a6b549aabaa4d5314a5d4a7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=538, earliestPutTs=1732332131840 2024-11-23T03:22:13,079 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1be278dd925b48f09adf72d416d90a92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=538, earliestPutTs=1732332131840 2024-11-23T03:22:13,087 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#B#compaction#311 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:13,087 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/dca6fee492f94220a44c2ac1c7aab13a is 50, key is test_row_0/B:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:13,090 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#A#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:13,090 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/67172fef7878486e998bc4beb650a9b6 is 50, key is test_row_0/A:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:13,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742184_1360 (size=13595) 2024-11-23T03:22:13,105 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/67172fef7878486e998bc4beb650a9b6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/67172fef7878486e998bc4beb650a9b6 2024-11-23T03:22:13,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742183_1359 (size=13595) 2024-11-23T03:22:13,113 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/A of 2aadefa362142abf479e6e4ecc26db15 into 67172fef7878486e998bc4beb650a9b6(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:13,113 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:13,113 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/A, priority=13, startTime=1732332133076; duration=0sec 2024-11-23T03:22:13,113 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:13,113 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:A 2024-11-23T03:22:13,113 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:13,113 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/dca6fee492f94220a44c2ac1c7aab13a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/dca6fee492f94220a44c2ac1c7aab13a 2024-11-23T03:22:13,114 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:13,114 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 2aadefa362142abf479e6e4ecc26db15/C is initiating minor compaction (all files) 2024-11-23T03:22:13,114 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2aadefa362142abf479e6e4ecc26db15/C in TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:13,114 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/aba09aba886042879048c8d04f41dd2d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp, totalSize=37.2 K 2024-11-23T03:22:13,114 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting aba09aba886042879048c8d04f41dd2d, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732332130500 2024-11-23T03:22:13,115 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b563e23362944a6bbfd395246170bcb9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=520, earliestPutTs=1732332131172 2024-11-23T03:22:13,115 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea8931bb98284f7baeca15015f089a1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=538, earliestPutTs=1732332131840 2024-11-23T03:22:13,118 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/B of 2aadefa362142abf479e6e4ecc26db15 into dca6fee492f94220a44c2ac1c7aab13a(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:13,118 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:13,118 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/B, priority=13, startTime=1732332133076; duration=0sec 2024-11-23T03:22:13,118 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:13,118 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:B 2024-11-23T03:22:13,122 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aadefa362142abf479e6e4ecc26db15#C#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:13,122 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1ffbfe6e41814707b65d14fbd28150ed is 50, key is test_row_0/C:col10/1732332131851/Put/seqid=0 2024-11-23T03:22:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T03:22:13,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742185_1361 (size=13561) 2024-11-23T03:22:13,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:13,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T03:22:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:13,135 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:13,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:13,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/87d91edad3df47399bac834f795c28af is 50, key is test_row_0/A:col10/1732332132245/Put/seqid=0 2024-11-23T03:22:13,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742186_1362 (size=12301) 2024-11-23T03:22:13,143 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=561 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/87d91edad3df47399bac834f795c28af 2024-11-23T03:22:13,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/396191e39207420d8d7ada96d3ade7c3 is 50, key is test_row_0/B:col10/1732332132245/Put/seqid=0 2024-11-23T03:22:13,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742187_1363 (size=12301) 2024-11-23T03:22:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:13,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. as already flushing 2024-11-23T03:22:13,367 DEBUG [Thread-1134 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:61411 2024-11-23T03:22:13,367 DEBUG [Thread-1134 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:13,368 DEBUG [Thread-1138 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:61411 2024-11-23T03:22:13,368 DEBUG [Thread-1138 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:13,531 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/1ffbfe6e41814707b65d14fbd28150ed as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ffbfe6e41814707b65d14fbd28150ed 2024-11-23T03:22:13,535 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2aadefa362142abf479e6e4ecc26db15/C of 2aadefa362142abf479e6e4ecc26db15 into 1ffbfe6e41814707b65d14fbd28150ed(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:13,535 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:13,535 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15., storeName=2aadefa362142abf479e6e4ecc26db15/C, priority=13, startTime=1732332133076; duration=0sec 2024-11-23T03:22:13,535 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:13,535 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aadefa362142abf479e6e4ecc26db15:C 2024-11-23T03:22:13,560 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=561 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/396191e39207420d8d7ada96d3ade7c3 2024-11-23T03:22:13,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9c06f27079894517bbdcd3ed2874e71e is 50, key is test_row_0/C:col10/1732332132245/Put/seqid=0 2024-11-23T03:22:13,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742188_1364 (size=12301) 2024-11-23T03:22:13,577 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=561 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9c06f27079894517bbdcd3ed2874e71e 2024-11-23T03:22:13,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/87d91edad3df47399bac834f795c28af as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/87d91edad3df47399bac834f795c28af 2024-11-23T03:22:13,585 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/87d91edad3df47399bac834f795c28af, entries=150, sequenceid=561, filesize=12.0 K 2024-11-23T03:22:13,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/396191e39207420d8d7ada96d3ade7c3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/396191e39207420d8d7ada96d3ade7c3 2024-11-23T03:22:13,590 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/396191e39207420d8d7ada96d3ade7c3, entries=150, sequenceid=561, filesize=12.0 K 2024-11-23T03:22:13,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/9c06f27079894517bbdcd3ed2874e71e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9c06f27079894517bbdcd3ed2874e71e 2024-11-23T03:22:13,596 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9c06f27079894517bbdcd3ed2874e71e, entries=150, sequenceid=561, filesize=12.0 K 2024-11-23T03:22:13,597 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=13.42 KB/13740 for 2aadefa362142abf479e6e4ecc26db15 in 462ms, sequenceid=561, compaction requested=false 2024-11-23T03:22:13,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:13,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:13,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-23T03:22:13,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-23T03:22:13,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-23T03:22:13,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0770 sec 2024-11-23T03:22:13,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0820 sec 2024-11-23T03:22:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T03:22:13,625 INFO [Thread-1142 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-23T03:22:22,911 DEBUG [Thread-1132 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:61411 2024-11-23T03:22:22,911 DEBUG [Thread-1132 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:22,965 DEBUG [Thread-1136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:61411 2024-11-23T03:22:22,965 DEBUG [Thread-1136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:23,008 DEBUG [Thread-1140 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:61411 2024-11-23T03:22:23,008 DEBUG [Thread-1140 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 199 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 184 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3943 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3852 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3783 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3944 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3857 2024-11-23T03:22:23,009 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:22:23,009 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:22:23,009 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64dc42d9 to 127.0.0.1:61411 2024-11-23T03:22:23,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:23,009 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:22:23,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:22:23,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T03:22:23,012 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332143012"}]},"ts":"1732332143012"} 2024-11-23T03:22:23,013 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:22:23,015 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:22:23,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:22:23,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, UNASSIGN}] 2024-11-23T03:22:23,017 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, UNASSIGN 2024-11-23T03:22:23,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=2aadefa362142abf479e6e4ecc26db15, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:23,018 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:22:23,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:23,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T03:22:23,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:23,170 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:23,170 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:22:23,170 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 2aadefa362142abf479e6e4ecc26db15, disabling compactions & flushes 2024-11-23T03:22:23,170 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:23,170 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:23,170 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. after waiting 0 ms 2024-11-23T03:22:23,170 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:23,171 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 2aadefa362142abf479e6e4ecc26db15 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=A 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=B 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2aadefa362142abf479e6e4ecc26db15, store=C 2024-11-23T03:22:23,171 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:23,177 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/f11ca46bab6b47eb90400b6a77fffbc4 is 50, key is test_row_0/A:col10/1732332142964/Put/seqid=0 2024-11-23T03:22:23,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742189_1365 (size=9857) 2024-11-23T03:22:23,205 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/f11ca46bab6b47eb90400b6a77fffbc4 2024-11-23T03:22:23,232 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a0997d035e1d47e9ac090266a924967e is 50, key is test_row_0/B:col10/1732332142964/Put/seqid=0 2024-11-23T03:22:23,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742190_1366 (size=9857) 2024-11-23T03:22:23,252 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a0997d035e1d47e9ac090266a924967e 2024-11-23T03:22:23,284 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6ebe552ea0724c9ea2021121b13fad7d is 50, key is test_row_0/C:col10/1732332142964/Put/seqid=0 2024-11-23T03:22:23,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T03:22:23,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742191_1367 (size=9857) 2024-11-23T03:22:23,323 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6ebe552ea0724c9ea2021121b13fad7d 2024-11-23T03:22:23,328 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/A/f11ca46bab6b47eb90400b6a77fffbc4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/f11ca46bab6b47eb90400b6a77fffbc4 2024-11-23T03:22:23,333 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/f11ca46bab6b47eb90400b6a77fffbc4, entries=100, sequenceid=570, filesize=9.6 K 2024-11-23T03:22:23,334 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/B/a0997d035e1d47e9ac090266a924967e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a0997d035e1d47e9ac090266a924967e 2024-11-23T03:22:23,338 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a0997d035e1d47e9ac090266a924967e, entries=100, sequenceid=570, filesize=9.6 K 2024-11-23T03:22:23,339 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/.tmp/C/6ebe552ea0724c9ea2021121b13fad7d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6ebe552ea0724c9ea2021121b13fad7d 2024-11-23T03:22:23,343 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6ebe552ea0724c9ea2021121b13fad7d, entries=100, sequenceid=570, filesize=9.6 K 2024-11-23T03:22:23,344 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 2aadefa362142abf479e6e4ecc26db15 in 174ms, sequenceid=570, compaction requested=true 2024-11-23T03:22:23,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/d7b0db9a26244aabbc85d02f5151f67e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9533907c63f54da8a04f2b7c9cbab514, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b09a80ede2144376adc5441f402e2ecb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7451f7ef75144fc2ad1c3af950a6bc43, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9376ac32ea204f558005122ae5a94134, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/21829ce55aba493db03e59df2c5d4202, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b2bd2f8e45f9471b8866e67bc639a43f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae17aa464482487abc70f3dc14f62052, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e1a47f27bcf2435aa71cb3ef7e9cbece, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/4e858ef81ea44b0a8f9588289553a605, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92] to archive 2024-11-23T03:22:23,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:23,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8d5826b42ebd4a289df88aa085767654 2024-11-23T03:22:23,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/5ad8051bf2b144b9be10e24ad0799475 2024-11-23T03:22:23,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a77eebc8e08a4d609a24c2806aa5c281 2024-11-23T03:22:23,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/c441600ea7744637a3340c26d7a41bd5 2024-11-23T03:22:23,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/d7b0db9a26244aabbc85d02f5151f67e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/d7b0db9a26244aabbc85d02f5151f67e 2024-11-23T03:22:23,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/0ec0e58778cd4a6b939610f267af07df 2024-11-23T03:22:23,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9533907c63f54da8a04f2b7c9cbab514 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9533907c63f54da8a04f2b7c9cbab514 2024-11-23T03:22:23,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1dae759dd154459fb1c04669a5ef5d2b 2024-11-23T03:22:23,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a7e4370c277e4b6db785e7f3ae58c41b 2024-11-23T03:22:23,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b09a80ede2144376adc5441f402e2ecb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b09a80ede2144376adc5441f402e2ecb 2024-11-23T03:22:23,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/a2201dc0a6ff4f2ab814f26406b8dbc7 2024-11-23T03:22:23,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/8ba414dceb4c4b04aaffea30a32c7935 2024-11-23T03:22:23,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae1e0df6bd2545fb8ff1617bedd8f279 2024-11-23T03:22:23,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9cc8df74069d40ce9df51ec7a7bab6ca 2024-11-23T03:22:23,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7451f7ef75144fc2ad1c3af950a6bc43 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7451f7ef75144fc2ad1c3af950a6bc43 2024-11-23T03:22:23,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/332a69392ce949de861b8b57101a93be 2024-11-23T03:22:23,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b9a92a86b15b4bcfbfb6c4c26884800b 2024-11-23T03:22:23,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/2ecef38eb9e74b59a06a112923f334e5 2024-11-23T03:22:23,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9376ac32ea204f558005122ae5a94134 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/9376ac32ea204f558005122ae5a94134 2024-11-23T03:22:23,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/27c5e7286f2c4026b75330f32facba2f 2024-11-23T03:22:23,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/84aa7604fa3843618e67b20bb9682712 2024-11-23T03:22:23,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/21829ce55aba493db03e59df2c5d4202 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/21829ce55aba493db03e59df2c5d4202 2024-11-23T03:22:23,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/7d61c7021b4c4e73a5762304fa288d2f 2024-11-23T03:22:23,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e17a18a2ccda4da98f920c8c19aeea2e 2024-11-23T03:22:23,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b2bd2f8e45f9471b8866e67bc639a43f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b2bd2f8e45f9471b8866e67bc639a43f 2024-11-23T03:22:23,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/b50945ee43574b6a8deeec9f2d340b0b 2024-11-23T03:22:23,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae17aa464482487abc70f3dc14f62052 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ae17aa464482487abc70f3dc14f62052 2024-11-23T03:22:23,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/32d829de4fbe4ddc817cece53d58bbcd 2024-11-23T03:22:23,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/598e088b6eb44867829b2832351672df 2024-11-23T03:22:23,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e1a47f27bcf2435aa71cb3ef7e9cbece to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e1a47f27bcf2435aa71cb3ef7e9cbece 2024-11-23T03:22:23,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/aa722fdd7dee4a85be62b779b6d8dd3d 2024-11-23T03:22:23,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/83690458c3294b4bb92ed36edca6b2c3 2024-11-23T03:22:23,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/fbdbeb2b5145467db7c3355c697b647a 2024-11-23T03:22:23,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/ff231ffb70a8447ca194c633c370cb17 2024-11-23T03:22:23,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/4e858ef81ea44b0a8f9588289553a605 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/4e858ef81ea44b0a8f9588289553a605 2024-11-23T03:22:23,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/6cd70fef4b794d449ab3b203a72e3148 2024-11-23T03:22:23,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/e4bf21770a764c2782e68989bc384126 2024-11-23T03:22:23,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/1be278dd925b48f09adf72d416d90a92 2024-11-23T03:22:23,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/fa4a7fceecff4bbfa1e8f719c4c7c243, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/cda5dcab369b48fa8d7a679cdebe959e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/c94ddfa363bf4c0a9d314978749ba391, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/3895135bbeb5435bb85ec84a84e61a37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66106baa9136475f9c31786f8db2c666, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2bdb3410eeae4eb9a06a65da89d8bdba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/5567517b9e1f49b2b988297b58702da8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/62381e115b264f9cbe6efdab076bc358, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/b421760bfc4e447cabd9afd3d7c380c8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2333cc3e041e47c0be9c5a8a286e70d8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a] to archive 2024-11-23T03:22:23,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:23,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/930de58f875b41db89b094768dd34285 2024-11-23T03:22:23,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/52af50c2bda1403a9592d47d647a47bf 2024-11-23T03:22:23,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/def4909d56ef43d79eb09c0bfb1aca20 2024-11-23T03:22:23,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/fa4a7fceecff4bbfa1e8f719c4c7c243 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/fa4a7fceecff4bbfa1e8f719c4c7c243 2024-11-23T03:22:23,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/1cad97051bab4b1ba94022955b4ad319 2024-11-23T03:22:23,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/36d2d176978f404e980bc5fffa191907 2024-11-23T03:22:23,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/cda5dcab369b48fa8d7a679cdebe959e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/cda5dcab369b48fa8d7a679cdebe959e 2024-11-23T03:22:23,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9342d5bfdac64a69b8a4d0785775a5e8 2024-11-23T03:22:23,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a8ff111eff984f51b05a42ccf8930563 2024-11-23T03:22:23,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/c94ddfa363bf4c0a9d314978749ba391 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/c94ddfa363bf4c0a9d314978749ba391 2024-11-23T03:22:23,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/956e42e3cc4941c7b2053e5559bf4be0 2024-11-23T03:22:23,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/47e84e481ea444af9258755d8111fa48 2024-11-23T03:22:23,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/af781bb66538496b8ae3d9bc710e0ba5 2024-11-23T03:22:23,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/3895135bbeb5435bb85ec84a84e61a37 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/3895135bbeb5435bb85ec84a84e61a37 2024-11-23T03:22:23,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/91dfc198a11840c0a09458e3f54edc72 2024-11-23T03:22:23,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/440cffca44614b1e930eaea478d4487f 2024-11-23T03:22:23,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/9c795ef44bf845ffbf912ae3dc04e350 2024-11-23T03:22:23,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66106baa9136475f9c31786f8db2c666 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66106baa9136475f9c31786f8db2c666 2024-11-23T03:22:23,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/46b98dbddd6941b8a96a473086a49762 2024-11-23T03:22:23,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/66b5ebe6da394ec6a79f395d3d60b99b 2024-11-23T03:22:23,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2bdb3410eeae4eb9a06a65da89d8bdba to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2bdb3410eeae4eb9a06a65da89d8bdba 2024-11-23T03:22:23,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/01e7250c4f2b4e69910b99f38d716d0f 2024-11-23T03:22:23,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4516b8d002434343b018ea7994192aec 2024-11-23T03:22:23,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/5567517b9e1f49b2b988297b58702da8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/5567517b9e1f49b2b988297b58702da8 2024-11-23T03:22:23,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/151d37cea748443285d50fad10b38ade 2024-11-23T03:22:23,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/682b7b2ec67541c2be7529456e2b067f 2024-11-23T03:22:23,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/62381e115b264f9cbe6efdab076bc358 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/62381e115b264f9cbe6efdab076bc358 2024-11-23T03:22:23,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/4f2472e8467348fc8a327522cf62f70f 2024-11-23T03:22:23,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/82dabee4e82a4c6cbe1f707e4cec3201 2024-11-23T03:22:23,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/b421760bfc4e447cabd9afd3d7c380c8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/b421760bfc4e447cabd9afd3d7c380c8 2024-11-23T03:22:23,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/89c6f42bf1504496907d559f9a3c26dd 2024-11-23T03:22:23,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/986170c2aaf84aa5b9a7fbb91d564a16 2024-11-23T03:22:23,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/404c9bed86cb468c993a3833d46c8565 2024-11-23T03:22:23,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/8cc981fede2b4e079800fca6ee2a93ac 2024-11-23T03:22:23,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2333cc3e041e47c0be9c5a8a286e70d8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/2333cc3e041e47c0be9c5a8a286e70d8 2024-11-23T03:22:23,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/434362a2bf574a86b55f788000ba333c 2024-11-23T03:22:23,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/f979842a22d745c986ae99d20f5144cd 2024-11-23T03:22:23,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/25746b75a6b549aabaa4d5314a5d4a7a 2024-11-23T03:22:23,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1069d1cc81e04eb39f5fb286186e5859, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/732838f49ff243c2af5ac555ac1fe557, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/3acd7812c4ac416cad64fe357e77d4bb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/cd75c8b2740e409b9d67bbee27d5ee8e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1c759218964a4baa8f527cc684df263a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ce96f7bfeb74b8fa92d8961768c4b54, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/95f4a4679a8e4ecbb29330a36602ac8a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6acf26676b5a48bb977636d569073ffb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/aba09aba886042879048c8d04f41dd2d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e] to archive 2024-11-23T03:22:23,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:23,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4e1a8ca0eff044fabf2562cd0138c72c 2024-11-23T03:22:23,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/acf3ff1a41ee428587fe501e48609b84 2024-11-23T03:22:23,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ca2e4b9463664ecba139541e57103d6f 2024-11-23T03:22:23,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1069d1cc81e04eb39f5fb286186e5859 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1069d1cc81e04eb39f5fb286186e5859 2024-11-23T03:22:23,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/f1c2ddf85c4c487e83dd6f21b3e201e4 2024-11-23T03:22:23,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/355160e8256342e5b293a9e554045358 2024-11-23T03:22:23,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/732838f49ff243c2af5ac555ac1fe557 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/732838f49ff243c2af5ac555ac1fe557 2024-11-23T03:22:23,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/2d5c011b9fcc4df9b5a22b433b0333cd 2024-11-23T03:22:23,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8cdb0a36052549dbb6ce292e30db4102 2024-11-23T03:22:23,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/3acd7812c4ac416cad64fe357e77d4bb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/3acd7812c4ac416cad64fe357e77d4bb 2024-11-23T03:22:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T03:22:23,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1f5fe6bf788548b2a76b3fe3303bf460 2024-11-23T03:22:23,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b47b9b9a38434a3392c7d85063ab4b7a 2024-11-23T03:22:23,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/4bfb11f037454e5481aa85471a50ea49 2024-11-23T03:22:23,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/cd75c8b2740e409b9d67bbee27d5ee8e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/cd75c8b2740e409b9d67bbee27d5ee8e 2024-11-23T03:22:23,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b6d872e7a8494290be928c898ab45aab 2024-11-23T03:22:23,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c03ca5d182684fe0a23e47819f89b1bb 2024-11-23T03:22:23,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b1e5eee038574d11ae992193c960732a 2024-11-23T03:22:23,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/8f14bc556633431db5982c2b08351e56 2024-11-23T03:22:23,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1c759218964a4baa8f527cc684df263a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1c759218964a4baa8f527cc684df263a 2024-11-23T03:22:23,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/e8cb54c24c6b4126b3292cb2e827aab5 2024-11-23T03:22:23,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/dca9e57a2b9e499894eaaa8ae483009a 2024-11-23T03:22:23,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ce96f7bfeb74b8fa92d8961768c4b54 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ce96f7bfeb74b8fa92d8961768c4b54 2024-11-23T03:22:23,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ec514da191b2444096313ea02467e44e 2024-11-23T03:22:23,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/562eade890c04336babf920daeb32b75 2024-11-23T03:22:23,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9fe8614d12b94e91a02d7e094a4fdf40 2024-11-23T03:22:23,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/95f4a4679a8e4ecbb29330a36602ac8a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/95f4a4679a8e4ecbb29330a36602ac8a 2024-11-23T03:22:23,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6f83991b87964c1d91b9f881ada20ba0 2024-11-23T03:22:23,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/50b1fedac86c499f9376da640c2ead34 2024-11-23T03:22:23,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6acf26676b5a48bb977636d569073ffb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6acf26676b5a48bb977636d569073ffb 2024-11-23T03:22:23,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/70d75619ec5846b7b43e5f982bf54677 2024-11-23T03:22:23,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/bd1b51689325400fa59cfe91b0fc290c 2024-11-23T03:22:23,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/29ed3dada253428ca0ee15c408eff1a9 2024-11-23T03:22:23,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/c07012883bb94a42a5b250057634ef1a 2024-11-23T03:22:23,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/aba09aba886042879048c8d04f41dd2d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/aba09aba886042879048c8d04f41dd2d 2024-11-23T03:22:23,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/696e856e73d14c56b6f38f71f6874457 2024-11-23T03:22:23,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/b563e23362944a6bbfd395246170bcb9 2024-11-23T03:22:23,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/ea8931bb98284f7baeca15015f089a1e 2024-11-23T03:22:23,693 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/recovered.edits/573.seqid, newMaxSeqId=573, maxSeqId=1 2024-11-23T03:22:23,694 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15. 2024-11-23T03:22:23,694 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 2aadefa362142abf479e6e4ecc26db15: 2024-11-23T03:22:23,695 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:23,696 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=2aadefa362142abf479e6e4ecc26db15, regionState=CLOSED 2024-11-23T03:22:23,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-23T03:22:23,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 2aadefa362142abf479e6e4ecc26db15, server=0d51875c74df,34141,1732332039937 in 679 msec 2024-11-23T03:22:23,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-23T03:22:23,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2aadefa362142abf479e6e4ecc26db15, UNASSIGN in 682 msec 2024-11-23T03:22:23,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T03:22:23,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 685 msec 2024-11-23T03:22:23,702 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332143702"}]},"ts":"1732332143702"} 2024-11-23T03:22:23,703 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:22:23,715 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:22:23,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 706 msec 2024-11-23T03:22:24,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T03:22:24,116 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-23T03:22:24,117 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:22:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,118 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T03:22:24,119 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,122 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:24,124 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/recovered.edits] 2024-11-23T03:22:24,131 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/67172fef7878486e998bc4beb650a9b6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/67172fef7878486e998bc4beb650a9b6 2024-11-23T03:22:24,135 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/87d91edad3df47399bac834f795c28af to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/87d91edad3df47399bac834f795c28af 2024-11-23T03:22:24,136 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/f11ca46bab6b47eb90400b6a77fffbc4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/A/f11ca46bab6b47eb90400b6a77fffbc4 2024-11-23T03:22:24,144 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/396191e39207420d8d7ada96d3ade7c3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/396191e39207420d8d7ada96d3ade7c3 2024-11-23T03:22:24,148 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a0997d035e1d47e9ac090266a924967e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/a0997d035e1d47e9ac090266a924967e 2024-11-23T03:22:24,152 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/dca6fee492f94220a44c2ac1c7aab13a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/B/dca6fee492f94220a44c2ac1c7aab13a 2024-11-23T03:22:24,155 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ffbfe6e41814707b65d14fbd28150ed to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/1ffbfe6e41814707b65d14fbd28150ed 2024-11-23T03:22:24,156 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6ebe552ea0724c9ea2021121b13fad7d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/6ebe552ea0724c9ea2021121b13fad7d 2024-11-23T03:22:24,159 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9c06f27079894517bbdcd3ed2874e71e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/C/9c06f27079894517bbdcd3ed2874e71e 2024-11-23T03:22:24,169 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/recovered.edits/573.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15/recovered.edits/573.seqid 2024-11-23T03:22:24,169 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/2aadefa362142abf479e6e4ecc26db15 2024-11-23T03:22:24,170 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:22:24,171 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,178 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:22:24,180 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:22:24,182 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,182 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:22:24,182 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332144182"}]},"ts":"9223372036854775807"} 2024-11-23T03:22:24,184 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:22:24,184 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2aadefa362142abf479e6e4ecc26db15, NAME => 'TestAcidGuarantees,,1732332110349.2aadefa362142abf479e6e4ecc26db15.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:22:24,184 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:22:24,184 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332144184"}]},"ts":"9223372036854775807"} 2024-11-23T03:22:24,186 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:22:24,188 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 71 msec 2024-11-23T03:22:24,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T03:22:24,220 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-23T03:22:24,232 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237 (was 237), OpenFileDescriptor=445 (was 452), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=552 (was 304) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3984 (was 4798) 2024-11-23T03:22:24,244 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=552, ProcessCount=11, AvailableMemoryMB=3983 2024-11-23T03:22:24,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:22:24,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:22:24,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,251 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:22:24,252 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:24,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-11-23T03:22:24,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T03:22:24,252 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:22:24,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742192_1368 (size=960) 2024-11-23T03:22:24,279 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:22:24,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742193_1369 (size=53) 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 73db48321d40b906ec9d02fc18076043, disabling compactions & flushes 2024-11-23T03:22:24,302 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. after waiting 0 ms 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,302 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:24,304 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:22:24,304 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332144304"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332144304"}]},"ts":"1732332144304"} 2024-11-23T03:22:24,305 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:22:24,306 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:22:24,307 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332144306"}]},"ts":"1732332144306"} 2024-11-23T03:22:24,308 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:22:24,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, ASSIGN}] 2024-11-23T03:22:24,313 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, ASSIGN 2024-11-23T03:22:24,314 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:22:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T03:22:24,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:24,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T03:22:24,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:24,622 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,622 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:22:24,622 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,622 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:22:24,622 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,622 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,624 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,625 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:24,625 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName A 2024-11-23T03:22:24,626 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:24,626 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:24,626 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,627 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:24,627 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName B 2024-11-23T03:22:24,628 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:24,628 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:24,628 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,633 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:24,633 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName C 2024-11-23T03:22:24,633 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:24,633 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:24,634 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,634 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,635 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,636 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:22:24,638 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:24,639 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:22:24,640 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened 73db48321d40b906ec9d02fc18076043; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67702728, jitterRate=0.008849263191223145}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:22:24,641 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:24,642 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., pid=96, masterSystemTime=1732332144618 2024-11-23T03:22:24,643 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,643 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:24,643 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:24,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-23T03:22:24,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 in 177 msec 2024-11-23T03:22:24,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-23T03:22:24,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, ASSIGN in 334 msec 2024-11-23T03:22:24,648 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:22:24,648 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332144648"}]},"ts":"1732332144648"} 2024-11-23T03:22:24,649 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:22:24,652 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:22:24,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 404 msec 2024-11-23T03:22:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T03:22:24,856 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-11-23T03:22:24,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f7f772a to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b976e1a 2024-11-23T03:22:24,873 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df61dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:24,879 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:24,883 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:24,884 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:22:24,885 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:22:24,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:22:24,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:22:24,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:24,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742194_1370 (size=996) 2024-11-23T03:22:24,900 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T03:22:24,900 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T03:22:24,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:22:24,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, REOPEN/MOVE}] 2024-11-23T03:22:24,905 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, REOPEN/MOVE 2024-11-23T03:22:24,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:24,907 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:22:24,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:25,059 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,059 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,059 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:22:25,059 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing 73db48321d40b906ec9d02fc18076043, disabling compactions & flushes 2024-11-23T03:22:25,059 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,059 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,059 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. after waiting 0 ms 2024-11-23T03:22:25,059 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,068 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T03:22:25,069 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,069 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:25,070 WARN [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: 73db48321d40b906ec9d02fc18076043 to self. 2024-11-23T03:22:25,071 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,072 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=CLOSED 2024-11-23T03:22:25,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-23T03:22:25,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 in 166 msec 2024-11-23T03:22:25,076 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, REOPEN/MOVE; state=CLOSED, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=true 2024-11-23T03:22:25,226 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:25,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,384 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,384 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:22:25,385 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,385 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:22:25,385 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,385 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,389 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,391 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:25,391 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName A 2024-11-23T03:22:25,392 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:25,393 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:25,393 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,394 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:25,394 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName B 2024-11-23T03:22:25,394 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:25,395 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:25,395 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,395 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:25,396 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73db48321d40b906ec9d02fc18076043 columnFamilyName C 2024-11-23T03:22:25,396 DEBUG [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:25,404 INFO [StoreOpener-73db48321d40b906ec9d02fc18076043-1 {}] regionserver.HStore(327): Store=73db48321d40b906ec9d02fc18076043/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:25,404 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,405 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,406 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,408 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:22:25,410 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,412 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened 73db48321d40b906ec9d02fc18076043; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66804217, jitterRate=-0.004539594054222107}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:22:25,413 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:25,414 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., pid=101, masterSystemTime=1732332145379 2024-11-23T03:22:25,416 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,416 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,417 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=OPEN, openSeqNum=5, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-11-23T03:22:25,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 in 191 msec 2024-11-23T03:22:25,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-23T03:22:25,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, REOPEN/MOVE in 515 msec 2024-11-23T03:22:25,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-23T03:22:25,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 519 msec 2024-11-23T03:22:25,424 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 535 msec 2024-11-23T03:22:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-23T03:22:25,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bf5e2f0 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b82ba2a 2024-11-23T03:22:25,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3637e4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-11-23T03:22:25,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,439 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-11-23T03:22:25,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-11-23T03:22:25,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,451 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-11-23T03:22:25,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,455 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-11-23T03:22:25,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,462 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-11-23T03:22:25,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,471 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-11-23T03:22:25,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,474 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-11-23T03:22:25,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,479 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-11-23T03:22:25,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:25,484 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-23T03:22:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:25,487 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:25,487 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:25,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:25,488 DEBUG [hconnection-0x79f77565-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,488 DEBUG [hconnection-0x3625c1c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,489 DEBUG [hconnection-0x11171b58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,489 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,489 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,490 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,490 DEBUG [hconnection-0x522168ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,491 DEBUG [hconnection-0x7f8fd376-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,492 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,492 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,500 DEBUG [hconnection-0xfeeffdc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,501 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,506 DEBUG [hconnection-0x56ffb399-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,507 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,508 DEBUG [hconnection-0xc900263-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,512 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:25,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:25,523 DEBUG [hconnection-0x6b2237d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,525 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,534 DEBUG [hconnection-0x4e45fa7c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:25,536 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:25,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332205538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332205539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332205541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238cc0977aad7844749630c1da303f514b_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332145508/Put/seqid=0 2024-11-23T03:22:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742195_1371 (size=12154) 2024-11-23T03:22:25,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:25,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:25,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332205641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332205642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332205642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332205644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332205646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:25,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:25,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:25,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332205844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332205844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332205844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332205848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332205848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:25,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:25,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,010 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:26,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238cc0977aad7844749630c1da303f514b_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238cc0977aad7844749630c1da303f514b_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:26,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/3fe34eb55ef145729d16839426d5a3d5, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:26,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/3fe34eb55ef145729d16839426d5a3d5 is 175, key is test_row_0/A:col10/1732332145508/Put/seqid=0 2024-11-23T03:22:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742196_1372 (size=30955) 2024-11-23T03:22:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:26,103 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332206146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332206148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332206148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332206151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332206155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,481 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/3fe34eb55ef145729d16839426d5a3d5 2024-11-23T03:22:26,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/78f5c04b717843a4b523cfe3707968f2 is 50, key is test_row_0/B:col10/1732332145508/Put/seqid=0 2024-11-23T03:22:26,564 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742197_1373 (size=12001) 2024-11-23T03:22:26,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:26,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332206652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332206653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332206655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332206658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:26,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332206665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,717 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,870 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:26,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:26,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:26,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:26,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:26,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/78f5c04b717843a4b523cfe3707968f2 2024-11-23T03:22:27,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:27,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:27,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:27,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:27,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:27,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:27,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:27,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/f559fed2a9d94ad4a129a0960bc10841 is 50, key is test_row_0/C:col10/1732332145508/Put/seqid=0 2024-11-23T03:22:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742198_1374 (size=12001) 2024-11-23T03:22:27,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/f559fed2a9d94ad4a129a0960bc10841 2024-11-23T03:22:27,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/3fe34eb55ef145729d16839426d5a3d5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5 2024-11-23T03:22:27,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5, entries=150, sequenceid=18, filesize=30.2 K 2024-11-23T03:22:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:22:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:22:27,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/78f5c04b717843a4b523cfe3707968f2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2 2024-11-23T03:22:27,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2, entries=150, sequenceid=18, filesize=11.7 K 2024-11-23T03:22:27,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/f559fed2a9d94ad4a129a0960bc10841 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841 2024-11-23T03:22:27,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841, entries=150, sequenceid=18, filesize=11.7 K 2024-11-23T03:22:27,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 73db48321d40b906ec9d02fc18076043 in 1605ms, sequenceid=18, compaction requested=false 2024-11-23T03:22:27,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:27,176 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T03:22:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:27,177 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:27,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:27,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235a2ddca3791f443aa9f2bd838d3045da_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332145538/Put/seqid=0 2024-11-23T03:22:27,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742199_1375 (size=12154) 2024-11-23T03:22:27,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:27,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:27,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:27,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:27,674 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235a2ddca3791f443aa9f2bd838d3045da_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235a2ddca3791f443aa9f2bd838d3045da_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:27,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/b02356e46285424eabd438aab7120f29, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:27,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/b02356e46285424eabd438aab7120f29 is 175, key is test_row_0/A:col10/1732332145538/Put/seqid=0 2024-11-23T03:22:27,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332207684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332207687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332207688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332207689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332207690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742200_1376 (size=30955) 2024-11-23T03:22:27,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332207793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332207798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332207804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332207804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:27,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332207812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332208003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332208006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332208019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332208021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332208022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,116 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/b02356e46285424eabd438aab7120f29 2024-11-23T03:22:28,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/0c2e0031cc56497294774c94ff56fb23 is 50, key is test_row_0/B:col10/1732332145538/Put/seqid=0 2024-11-23T03:22:28,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742201_1377 (size=12001) 2024-11-23T03:22:28,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332208316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332208319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332208324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332208327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332208349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,579 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/0c2e0031cc56497294774c94ff56fb23 2024-11-23T03:22:28,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/8d257459708d4c358aa5ac4ed580370f is 50, key is test_row_0/C:col10/1732332145538/Put/seqid=0 2024-11-23T03:22:28,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742202_1378 (size=12001) 2024-11-23T03:22:28,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332208828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332208844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332208848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332208852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:28,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:28,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332208857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,044 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/8d257459708d4c358aa5ac4ed580370f 2024-11-23T03:22:29,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/b02356e46285424eabd438aab7120f29 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29 2024-11-23T03:22:29,058 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T03:22:29,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/0c2e0031cc56497294774c94ff56fb23 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23 2024-11-23T03:22:29,062 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T03:22:29,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/8d257459708d4c358aa5ac4ed580370f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f 2024-11-23T03:22:29,071 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T03:22:29,072 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 73db48321d40b906ec9d02fc18076043 in 1895ms, sequenceid=41, compaction requested=false 2024-11-23T03:22:29,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:29,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:29,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-23T03:22:29,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-23T03:22:29,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-23T03:22:29,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5860 sec 2024-11-23T03:22:29,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 3.5910 sec 2024-11-23T03:22:29,269 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:22:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T03:22:29,592 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-23T03:22:29,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-23T03:22:29,596 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:29,598 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:29,598 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:29,754 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T03:22:29,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:29,755 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T03:22:29,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:29,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:29,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:29,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:29,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:29,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:29,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112372ebce64d327453bb368176c8aa98ca7_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332147687/Put/seqid=0 2024-11-23T03:22:29,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742203_1379 (size=12154) 2024-11-23T03:22:29,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:29,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:29,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:29,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332209898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:29,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332209899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332209901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332209904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:29,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332209912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332210010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332210010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332210016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332210016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332210018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:30,197 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112372ebce64d327453bb368176c8aa98ca7_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112372ebce64d327453bb368176c8aa98ca7_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:30,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/29fb037c620e4383b99ffbdc703e8d52, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:30,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/29fb037c620e4383b99ffbdc703e8d52 is 175, key is test_row_0/A:col10/1732332147687/Put/seqid=0 2024-11-23T03:22:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:30,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332210218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332210218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332210224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332210224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332210225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742204_1380 (size=30955) 2024-11-23T03:22:30,237 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/29fb037c620e4383b99ffbdc703e8d52 2024-11-23T03:22:30,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/24c6ca012e1a41fea8a58271ba4805d7 is 50, key is test_row_0/B:col10/1732332147687/Put/seqid=0 2024-11-23T03:22:30,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742205_1381 (size=12001) 2024-11-23T03:22:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332210524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332210524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332210532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332210532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332210534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:30,695 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/24c6ca012e1a41fea8a58271ba4805d7 2024-11-23T03:22:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:30,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/96a7eb2826374e51aa2d5650145e532f is 50, key is test_row_0/C:col10/1732332147687/Put/seqid=0 2024-11-23T03:22:30,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742206_1382 (size=12001) 2024-11-23T03:22:30,766 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/96a7eb2826374e51aa2d5650145e532f 2024-11-23T03:22:30,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/29fb037c620e4383b99ffbdc703e8d52 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52 2024-11-23T03:22:30,778 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52, entries=150, sequenceid=54, filesize=30.2 K 2024-11-23T03:22:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/24c6ca012e1a41fea8a58271ba4805d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7 2024-11-23T03:22:30,784 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T03:22:30,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/96a7eb2826374e51aa2d5650145e532f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f 2024-11-23T03:22:30,790 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T03:22:30,791 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 73db48321d40b906ec9d02fc18076043 in 1036ms, sequenceid=54, compaction requested=true 2024-11-23T03:22:30,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:30,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:30,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-23T03:22:30,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-23T03:22:30,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-23T03:22:30,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1950 sec 2024-11-23T03:22:30,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.2000 sec 2024-11-23T03:22:31,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:31,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:31,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:31,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237d96e9e68b85499f9a7df306610a2ab4_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:31,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332211065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332211066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332211079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332211078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332211081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742207_1383 (size=14594) 2024-11-23T03:22:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332211183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332211184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332211192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332211193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332211194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332211390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332211397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332211398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332211400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332211401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,502 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:31,510 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237d96e9e68b85499f9a7df306610a2ab4_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d96e9e68b85499f9a7df306610a2ab4_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:31,511 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/69925669b1b44a51b85b13916c45d056, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:31,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/69925669b1b44a51b85b13916c45d056 is 175, key is test_row_0/A:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:31,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742208_1384 (size=39549) 2024-11-23T03:22:31,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332211698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T03:22:31,710 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-23T03:22:31,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332211703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332211704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332211707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:31,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332211707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-23T03:22:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:31,714 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:31,715 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:31,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:31,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:31,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:31,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:31,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:31,964 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/69925669b1b44a51b85b13916c45d056 2024-11-23T03:22:31,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/350f6d99fe9d4f66a32195ce9110fde1 is 50, key is test_row_0/B:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:32,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742209_1385 (size=12001) 2024-11-23T03:22:32,173 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332212202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332212212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332212213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:32,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332212216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:32,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332212217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:32,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/350f6d99fe9d4f66a32195ce9110fde1 2024-11-23T03:22:32,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ebb6584c514745a49e1c12861b87404c is 50, key is test_row_0/C:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:32,480 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742210_1386 (size=12001) 2024-11-23T03:22:32,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,791 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:32,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:32,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:32,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ebb6584c514745a49e1c12861b87404c 2024-11-23T03:22:32,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/69925669b1b44a51b85b13916c45d056 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056 2024-11-23T03:22:32,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056, entries=200, sequenceid=78, filesize=38.6 K 2024-11-23T03:22:32,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/350f6d99fe9d4f66a32195ce9110fde1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1 2024-11-23T03:22:32,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T03:22:32,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ebb6584c514745a49e1c12861b87404c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c 2024-11-23T03:22:32,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T03:22:32,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 73db48321d40b906ec9d02fc18076043 in 1895ms, sequenceid=78, compaction requested=true 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:32,929 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:22:32,929 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:32,931 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:32,931 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:32,931 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:32,931 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:32,931 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,931 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,931 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=46.9 K 2024-11-23T03:22:32,931 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=129.3 K 2024-11-23T03:22:32,931 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,931 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056] 2024-11-23T03:22:32,932 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78f5c04b717843a4b523cfe3707968f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732332145508 2024-11-23T03:22:32,933 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c2e0031cc56497294774c94ff56fb23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332145538 2024-11-23T03:22:32,933 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fe34eb55ef145729d16839426d5a3d5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732332145508 2024-11-23T03:22:32,933 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24c6ca012e1a41fea8a58271ba4805d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332147685 2024-11-23T03:22:32,933 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 350f6d99fe9d4f66a32195ce9110fde1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149901 2024-11-23T03:22:32,933 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b02356e46285424eabd438aab7120f29, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332145538 2024-11-23T03:22:32,934 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 29fb037c620e4383b99ffbdc703e8d52, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332147685 2024-11-23T03:22:32,935 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 69925669b1b44a51b85b13916c45d056, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149898 2024-11-23T03:22:32,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:32,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:32,945 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:32,951 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#332 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:32,952 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/02df864f4fb24e3c92e299c8fb128c7b is 50, key is test_row_0/B:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:32,989 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123009f1ee65e90402d89bfbd6ac2884b2a_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332151053/Put/seqid=0 2024-11-23T03:22:33,005 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123f6aa2fbece6a4c609f72b74ddd04ce3b_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,008 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123f6aa2fbece6a4c609f72b74ddd04ce3b_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,008 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f6aa2fbece6a4c609f72b74ddd04ce3b_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742211_1387 (size=12139) 2024-11-23T03:22:33,044 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/02df864f4fb24e3c92e299c8fb128c7b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/02df864f4fb24e3c92e299c8fb128c7b 2024-11-23T03:22:33,050 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into 02df864f4fb24e3c92e299c8fb128c7b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:33,050 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:33,050 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=12, startTime=1732332152929; duration=0sec 2024-11-23T03:22:33,050 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:33,050 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:33,050 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:33,054 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:33,054 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:33,054 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:33,054 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=46.9 K 2024-11-23T03:22:33,055 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f559fed2a9d94ad4a129a0960bc10841, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732332145508 2024-11-23T03:22:33,055 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d257459708d4c358aa5ac4ed580370f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732332145538 2024-11-23T03:22:33,057 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96a7eb2826374e51aa2d5650145e532f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332147685 2024-11-23T03:22:33,057 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebb6584c514745a49e1c12861b87404c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149901 2024-11-23T03:22:33,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742213_1389 (size=4469) 2024-11-23T03:22:33,075 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#333 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:33,075 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f3e879c264c24102b54c4b742c67f90f is 175, key is test_row_0/A:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:33,081 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#335 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:33,082 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/636e58263d8c4d80b9303aefaf8f768f is 50, key is test_row_0/C:col10/1732332149907/Put/seqid=0 2024-11-23T03:22:33,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742212_1388 (size=12154) 2024-11-23T03:22:33,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:33,100 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123009f1ee65e90402d89bfbd6ac2884b2a_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123009f1ee65e90402d89bfbd6ac2884b2a_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:33,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fc753a5ae6c442bc9a23cc953cca0e41, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fc753a5ae6c442bc9a23cc953cca0e41 is 175, key is test_row_0/A:col10/1732332151053/Put/seqid=0 2024-11-23T03:22:33,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742214_1390 (size=31093) 2024-11-23T03:22:33,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742215_1391 (size=12139) 2024-11-23T03:22:33,165 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f3e879c264c24102b54c4b742c67f90f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f 2024-11-23T03:22:33,181 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into f3e879c264c24102b54c4b742c67f90f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:33,182 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:33,182 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=12, startTime=1732332152929; duration=0sec 2024-11-23T03:22:33,182 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:33,182 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:33,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742216_1392 (size=30955) 2024-11-23T03:22:33,194 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=90, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fc753a5ae6c442bc9a23cc953cca0e41 2024-11-23T03:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:33,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:33,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/745e395aeae04d78b8742ffca77ceee2 is 50, key is test_row_0/B:col10/1732332151053/Put/seqid=0 2024-11-23T03:22:33,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742217_1393 (size=12001) 2024-11-23T03:22:33,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332213252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,258 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/745e395aeae04d78b8742ffca77ceee2 2024-11-23T03:22:33,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332213254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332213253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332213256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332213257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/7189decdf8db4468ad8aa941e256f9ef is 50, key is test_row_0/C:col10/1732332151053/Put/seqid=0 2024-11-23T03:22:33,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742218_1394 (size=12001) 2024-11-23T03:22:33,315 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/7189decdf8db4468ad8aa941e256f9ef 2024-11-23T03:22:33,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fc753a5ae6c442bc9a23cc953cca0e41 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41 2024-11-23T03:22:33,325 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41, entries=150, sequenceid=90, filesize=30.2 K 2024-11-23T03:22:33,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/745e395aeae04d78b8742ffca77ceee2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2 2024-11-23T03:22:33,331 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:22:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/7189decdf8db4468ad8aa941e256f9ef as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef 2024-11-23T03:22:33,336 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:22:33,337 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 73db48321d40b906ec9d02fc18076043 in 392ms, sequenceid=90, compaction requested=false 2024-11-23T03:22:33,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:33,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:33,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-23T03:22:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-23T03:22:33,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-23T03:22:33,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-11-23T03:22:33,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.6280 sec 2024-11-23T03:22:33,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:33,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:33,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:33,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239174221734604586a970243aab3e0509_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:33,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332213381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332213399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332213399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332213400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332213401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742219_1395 (size=12154) 2024-11-23T03:22:33,439 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:33,444 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239174221734604586a970243aab3e0509_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239174221734604586a970243aab3e0509_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:33,445 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ad34f09425ef479ea982036733445770, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:33,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ad34f09425ef479ea982036733445770 is 175, key is test_row_0/A:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:33,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742220_1396 (size=30955) 2024-11-23T03:22:33,494 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ad34f09425ef479ea982036733445770 2024-11-23T03:22:33,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332213503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5d7885a27a4e70847b1fd3845447c2 is 50, key is test_row_0/B:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:33,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332213512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332213512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332213513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332213513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742221_1397 (size=12001) 2024-11-23T03:22:33,576 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/636e58263d8c4d80b9303aefaf8f768f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/636e58263d8c4d80b9303aefaf8f768f 2024-11-23T03:22:33,581 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into 636e58263d8c4d80b9303aefaf8f768f(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:33,581 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:33,581 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=12, startTime=1732332152929; duration=0sec 2024-11-23T03:22:33,581 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:33,581 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:33,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332213715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332213722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332213723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332213723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:33,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332213724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T03:22:33,819 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-23T03:22:33,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-23T03:22:33,823 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:33,824 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:33,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:33,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:33,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5d7885a27a4e70847b1fd3845447c2 2024-11-23T03:22:33,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/b1d600c5bb9f4dcfa6a02f13bdcd554b is 50, key is test_row_0/C:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:33,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:33,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:33,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:33,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:33,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:33,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:33,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742222_1398 (size=12001) 2024-11-23T03:22:34,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/b1d600c5bb9f4dcfa6a02f13bdcd554b 2024-11-23T03:22:34,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ad34f09425ef479ea982036733445770 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770 2024-11-23T03:22:34,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770, entries=150, sequenceid=117, filesize=30.2 K 2024-11-23T03:22:34,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5d7885a27a4e70847b1fd3845447c2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2 2024-11-23T03:22:34,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332214021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2, entries=150, sequenceid=117, filesize=11.7 K 2024-11-23T03:22:34,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/b1d600c5bb9f4dcfa6a02f13bdcd554b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b 2024-11-23T03:22:34,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b, entries=150, sequenceid=117, filesize=11.7 K 2024-11-23T03:22:34,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 73db48321d40b906ec9d02fc18076043 in 672ms, sequenceid=117, compaction requested=true 2024-11-23T03:22:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:34,037 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:34,037 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:34,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:34,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:34,039 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:34,039 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:34,039 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,039 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=90.8 K 2024-11-23T03:22:34,039 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,039 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770] 2024-11-23T03:22:34,039 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:34,039 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:34,039 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,039 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/02df864f4fb24e3c92e299c8fb128c7b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=35.3 K 2024-11-23T03:22:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:34,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:34,041 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 02df864f4fb24e3c92e299c8fb128c7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149901 2024-11-23T03:22:34,042 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3e879c264c24102b54c4b742c67f90f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149901 2024-11-23T03:22:34,042 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 745e395aeae04d78b8742ffca77ceee2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332151053 2024-11-23T03:22:34,042 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc753a5ae6c442bc9a23cc953cca0e41, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332151053 2024-11-23T03:22:34,042 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ef5d7885a27a4e70847b1fd3845447c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:34,042 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad34f09425ef479ea982036733445770, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:34,058 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#341 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:34,059 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/db80d8ef6b25491dad13fdf79bd3a85b is 50, key is test_row_0/B:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:34,064 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:34,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d9da8c67f7944cc6a16ab06b52c0ffa2_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332153398/Put/seqid=0 2024-11-23T03:22:34,082 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411231b1387c761f541f896c4fdbad87cedd8_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:34,084 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411231b1387c761f541f896c4fdbad87cedd8_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:34,084 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231b1387c761f541f896c4fdbad87cedd8_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:34,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742223_1399 (size=12241) 2024-11-23T03:22:34,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332214102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332214102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332214104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332214105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,117 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/db80d8ef6b25491dad13fdf79bd3a85b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/db80d8ef6b25491dad13fdf79bd3a85b 2024-11-23T03:22:34,125 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into db80d8ef6b25491dad13fdf79bd3a85b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:34,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:34,125 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=13, startTime=1732332154037; duration=0sec 2024-11-23T03:22:34,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:34,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:34,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:34,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:34,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:34,126 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,129 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/636e58263d8c4d80b9303aefaf8f768f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=35.3 K 2024-11-23T03:22:34,129 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 636e58263d8c4d80b9303aefaf8f768f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732332149901 2024-11-23T03:22:34,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,130 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7189decdf8db4468ad8aa941e256f9ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332151053 2024-11-23T03:22:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,131 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b1d600c5bb9f4dcfa6a02f13bdcd554b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742224_1400 (size=14644) 2024-11-23T03:22:34,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742225_1401 (size=4469) 2024-11-23T03:22:34,169 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#342 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:34,170 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cafaa984195644ff806c487ed9fbb805 is 175, key is test_row_0/A:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:34,176 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#344 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:34,176 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/b97e2ad3e6a048d89f8657c2227f0050 is 50, key is test_row_0/C:col10/1732332153363/Put/seqid=0 2024-11-23T03:22:34,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742226_1402 (size=31195) 2024-11-23T03:22:34,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742227_1403 (size=12241) 2024-11-23T03:22:34,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332214217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332214220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,230 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/b97e2ad3e6a048d89f8657c2227f0050 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b97e2ad3e6a048d89f8657c2227f0050 2024-11-23T03:22:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332214224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332214224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,235 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into b97e2ad3e6a048d89f8657c2227f0050(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:34,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:34,235 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=13, startTime=1732332154038; duration=0sec 2024-11-23T03:22:34,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:34,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:34,283 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332214423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:34,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332214423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332214433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332214433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,437 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332214532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,542 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:34,548 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d9da8c67f7944cc6a16ab06b52c0ffa2_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d9da8c67f7944cc6a16ab06b52c0ffa2_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:34,549 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f30fb67a45144e5d856747268eccb026, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:34,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f30fb67a45144e5d856747268eccb026 is 175, key is test_row_0/A:col10/1732332153398/Put/seqid=0 2024-11-23T03:22:34,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742228_1404 (size=39599) 2024-11-23T03:22:34,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,604 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cafaa984195644ff806c487ed9fbb805 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805 2024-11-23T03:22:34,611 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into cafaa984195644ff806c487ed9fbb805(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:34,611 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:34,611 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=13, startTime=1732332154037; duration=0sec 2024-11-23T03:22:34,612 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:34,612 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:34,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332214731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332214731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:34,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332214739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332214738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:34,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:34,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:34,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:34,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:34,979 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f30fb67a45144e5d856747268eccb026 2024-11-23T03:22:34,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/bb78aed3aedf4ba9b621629d05e4f7a8 is 50, key is test_row_0/B:col10/1732332153398/Put/seqid=0 2024-11-23T03:22:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742229_1405 (size=12051) 2024-11-23T03:22:35,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332215240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332215240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332215250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332215253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/bb78aed3aedf4ba9b621629d05e4f7a8 2024-11-23T03:22:35,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/1fa380397b0a41958385f27d2e5aace0 is 50, key is test_row_0/C:col10/1732332153398/Put/seqid=0 2024-11-23T03:22:35,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742230_1406 (size=12051) 2024-11-23T03:22:35,509 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332215545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,819 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:35,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:35,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/1fa380397b0a41958385f27d2e5aace0 2024-11-23T03:22:35,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/f30fb67a45144e5d856747268eccb026 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026 2024-11-23T03:22:35,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026, entries=200, sequenceid=131, filesize=38.7 K 2024-11-23T03:22:35,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/bb78aed3aedf4ba9b621629d05e4f7a8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8 2024-11-23T03:22:35,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8, entries=150, sequenceid=131, filesize=11.8 K 2024-11-23T03:22:35,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/1fa380397b0a41958385f27d2e5aace0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0 2024-11-23T03:22:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:35,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0, entries=150, sequenceid=131, filesize=11.8 K 2024-11-23T03:22:35,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 73db48321d40b906ec9d02fc18076043 in 1891ms, sequenceid=131, compaction requested=false 2024-11-23T03:22:35,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:35,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:35,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:35,975 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:35,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236114a548caf64e11bfb53d3c8fbb9d2d_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332154103/Put/seqid=0 2024-11-23T03:22:36,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742231_1407 (size=12304) 2024-11-23T03:22:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:36,064 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236114a548caf64e11bfb53d3c8fbb9d2d_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236114a548caf64e11bfb53d3c8fbb9d2d_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/59d052af0ad445eca970d5b4e76c7716, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/59d052af0ad445eca970d5b4e76c7716 is 175, key is test_row_0/A:col10/1732332154103/Put/seqid=0 2024-11-23T03:22:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742232_1408 (size=31105) 2024-11-23T03:22:36,100 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/59d052af0ad445eca970d5b4e76c7716 2024-11-23T03:22:36,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/10dd465209e34ea59fc84dab259728fb is 50, key is test_row_0/B:col10/1732332154103/Put/seqid=0 2024-11-23T03:22:36,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742233_1409 (size=12151) 2024-11-23T03:22:36,162 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/10dd465209e34ea59fc84dab259728fb 2024-11-23T03:22:36,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4b5feae61fa648028d78cec561c47427 is 50, key is test_row_0/C:col10/1732332154103/Put/seqid=0 2024-11-23T03:22:36,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742234_1410 (size=12151) 2024-11-23T03:22:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:36,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:36,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332216280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332216282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332216284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332216286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332216390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332216391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332216392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332216395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332216597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332216597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332216599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,614 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4b5feae61fa648028d78cec561c47427 2024-11-23T03:22:36,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332216605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:36,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/59d052af0ad445eca970d5b4e76c7716 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716 2024-11-23T03:22:36,622 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716, entries=150, sequenceid=157, filesize=30.4 K 2024-11-23T03:22:36,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/10dd465209e34ea59fc84dab259728fb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb 2024-11-23T03:22:36,628 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T03:22:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4b5feae61fa648028d78cec561c47427 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427 2024-11-23T03:22:36,632 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T03:22:36,633 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 73db48321d40b906ec9d02fc18076043 in 658ms, sequenceid=157, compaction requested=true 2024-11-23T03:22:36,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:36,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:36,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-23T03:22:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-23T03:22:36,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-23T03:22:36,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8120 sec 2024-11-23T03:22:36,638 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.8160 sec 2024-11-23T03:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:36,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:22:36,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:36,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:36,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234177fc29ac2a458996ea98dbd056ec35_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:36,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742235_1411 (size=12304) 2024-11-23T03:22:37,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332217000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332217000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332217002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332217002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332217111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332217111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332217111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332217112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332217318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332217318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332217321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332217323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,364 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:37,370 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234177fc29ac2a458996ea98dbd056ec35_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234177fc29ac2a458996ea98dbd056ec35_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:37,371 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fb563f6576dc4e55bfb43c65e7fcde4b, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:37,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fb563f6576dc4e55bfb43c65e7fcde4b is 175, key is test_row_0/A:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:37,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742236_1412 (size=31105) 2024-11-23T03:22:37,382 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fb563f6576dc4e55bfb43c65e7fcde4b 2024-11-23T03:22:37,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/721bff5de03f42b5b553f6c6a1cd1355 is 50, key is test_row_0/B:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742237_1413 (size=12151) 2024-11-23T03:22:37,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332217566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,573 DEBUG [Thread-1664 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:37,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332217626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332217626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332217628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332217633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:37,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/721bff5de03f42b5b553f6c6a1cd1355 2024-11-23T03:22:37,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/e83c977efb8045db95f5ba58c0fbfc28 is 50, key is test_row_0/C:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:37,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742238_1414 (size=12151) 2024-11-23T03:22:37,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/e83c977efb8045db95f5ba58c0fbfc28 2024-11-23T03:22:37,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/fb563f6576dc4e55bfb43c65e7fcde4b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b 2024-11-23T03:22:37,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b, entries=150, sequenceid=172, filesize=30.4 K 2024-11-23T03:22:37,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/721bff5de03f42b5b553f6c6a1cd1355 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355 2024-11-23T03:22:37,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355, entries=150, sequenceid=172, filesize=11.9 K 2024-11-23T03:22:37,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/e83c977efb8045db95f5ba58c0fbfc28 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28 2024-11-23T03:22:37,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28, entries=150, sequenceid=172, filesize=11.9 K 2024-11-23T03:22:37,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 73db48321d40b906ec9d02fc18076043 in 977ms, sequenceid=172, compaction requested=true 2024-11-23T03:22:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:37,888 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:37,889 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:37,889 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:37,889 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:37,889 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=129.9 K 2024-11-23T03:22:37,889 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:37,889 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b] 2024-11-23T03:22:37,890 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting cafaa984195644ff806c487ed9fbb805, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:37,890 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f30fb67a45144e5d856747268eccb026, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732332153398 2024-11-23T03:22:37,891 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59d052af0ad445eca970d5b4e76c7716, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332154089 2024-11-23T03:22:37,893 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb563f6576dc4e55bfb43c65e7fcde4b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:37,899 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:37,901 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:37,901 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:37,901 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:37,901 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/db80d8ef6b25491dad13fdf79bd3a85b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=47.5 K 2024-11-23T03:22:37,901 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting db80d8ef6b25491dad13fdf79bd3a85b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:37,902 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting bb78aed3aedf4ba9b621629d05e4f7a8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732332153398 2024-11-23T03:22:37,902 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 10dd465209e34ea59fc84dab259728fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332154089 2024-11-23T03:22:37,902 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 721bff5de03f42b5b553f6c6a1cd1355, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:37,910 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:37,925 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#354 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:37,926 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411238377ef465d6441928d3174f3659ff989_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:37,928 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/385509cb6ff74740934d43cd1c190fd4 is 50, key is test_row_0/B:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:37,929 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411238377ef465d6441928d3174f3659ff989_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:37,929 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238377ef465d6441928d3174f3659ff989_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T03:22:37,930 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-23T03:22:37,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-23T03:22:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:37,935 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:37,935 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:37,935 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:37,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742239_1415 (size=12527) 2024-11-23T03:22:37,970 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/385509cb6ff74740934d43cd1c190fd4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/385509cb6ff74740934d43cd1c190fd4 2024-11-23T03:22:37,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742240_1416 (size=4469) 2024-11-23T03:22:37,978 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into 385509cb6ff74740934d43cd1c190fd4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:37,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:37,978 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=12, startTime=1732332157898; duration=0sec 2024-11-23T03:22:37,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:37,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:37,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:37,981 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:37,981 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:37,981 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:37,981 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b97e2ad3e6a048d89f8657c2227f0050, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=47.5 K 2024-11-23T03:22:37,982 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b97e2ad3e6a048d89f8657c2227f0050, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732332153254 2024-11-23T03:22:37,982 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fa380397b0a41958385f27d2e5aace0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732332153398 2024-11-23T03:22:37,984 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b5feae61fa648028d78cec561c47427, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332154089 2024-11-23T03:22:37,984 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e83c977efb8045db95f5ba58c0fbfc28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:38,001 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#355 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:38,002 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/5af9545ee00047f8b1d30d770693f00f is 50, key is test_row_0/C:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:38,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742241_1417 (size=12527) 2024-11-23T03:22:38,047 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/5af9545ee00047f8b1d30d770693f00f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/5af9545ee00047f8b1d30d770693f00f 2024-11-23T03:22:38,056 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into 5af9545ee00047f8b1d30d770693f00f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:38,057 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:38,057 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=12, startTime=1732332157913; duration=0sec 2024-11-23T03:22:38,057 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:38,057 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:38,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T03:22:38,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:38,089 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:38,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:38,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b73916ebee504f0d894bf16ee7d40d55_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332157000/Put/seqid=0 2024-11-23T03:22:38,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742242_1418 (size=12304) 2024-11-23T03:22:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:38,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:38,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332218160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332218161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332218162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332218164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:38,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332218272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332218273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332218274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:22:38,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332218278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,377 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#353 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:38,378 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/7ab0233907174588a42e7d1cb5cf53df is 175, key is test_row_0/A:col10/1732332156909/Put/seqid=0 2024-11-23T03:22:38,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742243_1419 (size=31481) 2024-11-23T03:22:38,400 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/7ab0233907174588a42e7d1cb5cf53df as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df 2024-11-23T03:22:38,405 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into 7ab0233907174588a42e7d1cb5cf53df(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:38,405 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:38,405 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=12, startTime=1732332157888; duration=0sec 2024-11-23T03:22:38,406 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:38,406 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:38,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332218475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332218476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332218481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332218488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:38,534 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b73916ebee504f0d894bf16ee7d40d55_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b73916ebee504f0d894bf16ee7d40d55_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:38,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/63d4f99687c24f0985ceb5b6cb13eb31, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:38,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/63d4f99687c24f0985ceb5b6cb13eb31 is 175, key is test_row_0/A:col10/1732332157000/Put/seqid=0 2024-11-23T03:22:38,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742244_1420 (size=31105) 2024-11-23T03:22:38,585 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/63d4f99687c24f0985ceb5b6cb13eb31 2024-11-23T03:22:38,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/379ad240c0714dd282a0315d1d27ad16 is 50, key is test_row_0/B:col10/1732332157000/Put/seqid=0 2024-11-23T03:22:38,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742245_1421 (size=12151) 2024-11-23T03:22:38,649 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/379ad240c0714dd282a0315d1d27ad16 2024-11-23T03:22:38,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a4a15e3134e741a998d40a425792a024 is 50, key is test_row_0/C:col10/1732332157000/Put/seqid=0 2024-11-23T03:22:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742246_1422 (size=12151) 2024-11-23T03:22:38,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332218783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332218784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332218791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:38,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332218793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:39,086 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a4a15e3134e741a998d40a425792a024 2024-11-23T03:22:39,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/63d4f99687c24f0985ceb5b6cb13eb31 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31 2024-11-23T03:22:39,096 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31, entries=150, sequenceid=196, filesize=30.4 K 2024-11-23T03:22:39,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/379ad240c0714dd282a0315d1d27ad16 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16 2024-11-23T03:22:39,112 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T03:22:39,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a4a15e3134e741a998d40a425792a024 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024 2024-11-23T03:22:39,117 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T03:22:39,118 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 73db48321d40b906ec9d02fc18076043 in 1029ms, sequenceid=196, compaction requested=false 2024-11-23T03:22:39,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:39,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:39,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-23T03:22:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-23T03:22:39,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-23T03:22:39,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1850 sec 2024-11-23T03:22:39,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.1890 sec 2024-11-23T03:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:39,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:22:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:39,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:39,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:39,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123031f454e18f04e599e82adb9e26df623_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:39,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742247_1423 (size=12304) 2024-11-23T03:22:39,325 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:39,329 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123031f454e18f04e599e82adb9e26df623_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123031f454e18f04e599e82adb9e26df623_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:39,330 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/c2380244171c44a9b9a229ded7cb7b42, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:39,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/c2380244171c44a9b9a229ded7cb7b42 is 175, key is test_row_0/A:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:39,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332219329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332219336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332219338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332219339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742248_1424 (size=31105) 2024-11-23T03:22:39,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332219440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332219446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332219446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332219448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332219649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332219655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332219656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:39,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332219658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:39,769 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/c2380244171c44a9b9a229ded7cb7b42 2024-11-23T03:22:39,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/170e2b3fc10f4841b2fcd4dbfc3ace3e is 50, key is test_row_0/B:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742249_1425 (size=12151) 2024-11-23T03:22:39,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/170e2b3fc10f4841b2fcd4dbfc3ace3e 2024-11-23T03:22:39,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/cc70dc4d268547068ba433c82b8c432c is 50, key is test_row_0/C:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:39,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742250_1426 (size=12151) 2024-11-23T03:22:39,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/cc70dc4d268547068ba433c82b8c432c 2024-11-23T03:22:39,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/c2380244171c44a9b9a229ded7cb7b42 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42 2024-11-23T03:22:39,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42, entries=150, sequenceid=213, filesize=30.4 K 2024-11-23T03:22:39,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/170e2b3fc10f4841b2fcd4dbfc3ace3e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e 2024-11-23T03:22:39,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e, entries=150, sequenceid=213, filesize=11.9 K 2024-11-23T03:22:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/cc70dc4d268547068ba433c82b8c432c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c 2024-11-23T03:22:39,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c, entries=150, sequenceid=213, filesize=11.9 K 2024-11-23T03:22:39,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 73db48321d40b906ec9d02fc18076043 in 598ms, sequenceid=213, compaction requested=true 2024-11-23T03:22:39,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:39,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:39,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:39,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:39,894 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:39,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:39,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:39,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:22:39,894 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:39,896 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:39,896 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:39,896 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:39,896 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/385509cb6ff74740934d43cd1c190fd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.0 K 2024-11-23T03:22:39,896 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:39,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:39,897 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:39,897 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=91.5 K 2024-11-23T03:22:39,897 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:39,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42] 2024-11-23T03:22:39,897 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 385509cb6ff74740934d43cd1c190fd4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:39,897 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ab0233907174588a42e7d1cb5cf53df, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:39,898 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 379ad240c0714dd282a0315d1d27ad16, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332156998 2024-11-23T03:22:39,898 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63d4f99687c24f0985ceb5b6cb13eb31, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332156998 2024-11-23T03:22:39,898 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 170e2b3fc10f4841b2fcd4dbfc3ace3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:39,899 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2380244171c44a9b9a229ded7cb7b42, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:39,912 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:39,922 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:39,923 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/1bad0c3d830249b38b0435ea011731ba is 50, key is test_row_0/B:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:39,937 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411234f66839bfa51476c9e67a79af1f043ef_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:39,939 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411234f66839bfa51476c9e67a79af1f043ef_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:39,939 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234f66839bfa51476c9e67a79af1f043ef_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:39,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:39,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742251_1427 (size=12629) 2024-11-23T03:22:39,972 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/1bad0c3d830249b38b0435ea011731ba as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/1bad0c3d830249b38b0435ea011731ba 2024-11-23T03:22:39,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742252_1428 (size=4469) 2024-11-23T03:22:39,983 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into 1bad0c3d830249b38b0435ea011731ba(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:39,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:39,983 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=13, startTime=1732332159893; duration=0sec 2024-11-23T03:22:39,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:39,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:39,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:39,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238796499234d743148dcbadc8e483b7ae_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332159962/Put/seqid=0 2024-11-23T03:22:39,984 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:39,985 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:39,998 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:39,998 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/5af9545ee00047f8b1d30d770693f00f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.0 K 2024-11-23T03:22:39,998 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5af9545ee00047f8b1d30d770693f00f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732332156282 2024-11-23T03:22:39,999 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a4a15e3134e741a998d40a425792a024, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332156998 2024-11-23T03:22:39,999 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cc70dc4d268547068ba433c82b8c432c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:40,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332220001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332220008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,019 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:40,019 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/212697ff61784ffca85b1667f789426c is 50, key is test_row_0/C:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:40,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332220011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332220013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742253_1429 (size=14794) 2024-11-23T03:22:40,026 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:40,032 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238796499234d743148dcbadc8e483b7ae_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238796499234d743148dcbadc8e483b7ae_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:40,032 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/77b1b9f0357748859a6f11c43c48c3dc, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:40,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/77b1b9f0357748859a6f11c43c48c3dc is 175, key is test_row_0/A:col10/1732332159962/Put/seqid=0 2024-11-23T03:22:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T03:22:40,040 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-23T03:22:40,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-23T03:22:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T03:22:40,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:40,045 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:40,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:40,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742254_1430 (size=12629) 2024-11-23T03:22:40,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742255_1431 (size=39749) 2024-11-23T03:22:40,062 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/77b1b9f0357748859a6f11c43c48c3dc 2024-11-23T03:22:40,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/04c7fe7ec07c4826a7edd72a6a86c1a1 is 50, key is test_row_0/B:col10/1732332159962/Put/seqid=0 2024-11-23T03:22:40,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742256_1432 (size=12151) 2024-11-23T03:22:40,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/04c7fe7ec07c4826a7edd72a6a86c1a1 2024-11-23T03:22:40,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/c51a7932f2ed4f5bafb5a3401887e1cb is 50, key is test_row_0/C:col10/1732332159962/Put/seqid=0 2024-11-23T03:22:40,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742257_1433 (size=12151) 2024-11-23T03:22:40,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/c51a7932f2ed4f5bafb5a3401887e1cb 2024-11-23T03:22:40,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/77b1b9f0357748859a6f11c43c48c3dc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc 2024-11-23T03:22:40,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332220116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332220118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc, entries=200, sequenceid=234, filesize=38.8 K 2024-11-23T03:22:40,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/04c7fe7ec07c4826a7edd72a6a86c1a1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1 2024-11-23T03:22:40,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1, entries=150, sequenceid=234, filesize=11.9 K 2024-11-23T03:22:40,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/c51a7932f2ed4f5bafb5a3401887e1cb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb 2024-11-23T03:22:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332220123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332220123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb, entries=150, sequenceid=234, filesize=11.9 K 2024-11-23T03:22:40,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 73db48321d40b906ec9d02fc18076043 in 172ms, sequenceid=234, compaction requested=false 2024-11-23T03:22:40,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T03:22:40,198 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T03:22:40,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:40,199 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:22:40,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:40,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:40,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb96368d1b8441f9be4fdb9254b00482_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332160011/Put/seqid=0 2024-11-23T03:22:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742258_1434 (size=12304) 2024-11-23T03:22:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:40,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T03:22:40,379 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#362 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:40,380 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/8c86c3f51241416695fe0468e8a0cfa1 is 175, key is test_row_0/A:col10/1732332158149/Put/seqid=0 2024-11-23T03:22:40,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332220368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332220369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332220370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332220370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742259_1435 (size=31583) 2024-11-23T03:22:40,419 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/8c86c3f51241416695fe0468e8a0cfa1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1 2024-11-23T03:22:40,425 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into 8c86c3f51241416695fe0468e8a0cfa1(size=30.8 K), total size for store is 69.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:40,425 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:40,425 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=13, startTime=1732332159893; duration=0sec 2024-11-23T03:22:40,425 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:40,425 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:40,467 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/212697ff61784ffca85b1667f789426c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/212697ff61784ffca85b1667f789426c 2024-11-23T03:22:40,473 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into 212697ff61784ffca85b1667f789426c(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:40,473 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:40,473 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=13, startTime=1732332159894; duration=0sec 2024-11-23T03:22:40,473 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:40,473 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:40,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332220483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332220484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332220486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332220486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:40,621 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb96368d1b8441f9be4fdb9254b00482_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb96368d1b8441f9be4fdb9254b00482_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:40,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cb7742e0c2034a9c839b70ccf9421e2d, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:40,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cb7742e0c2034a9c839b70ccf9421e2d is 175, key is test_row_0/A:col10/1732332160011/Put/seqid=0 2024-11-23T03:22:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T03:22:40,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742260_1436 (size=31105) 2024-11-23T03:22:40,666 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cb7742e0c2034a9c839b70ccf9421e2d 2024-11-23T03:22:40,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/b779aa83901b4bb7b4e36627fead91a8 is 50, key is test_row_0/B:col10/1732332160011/Put/seqid=0 2024-11-23T03:22:40,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332220690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332220691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332220691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332220697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:40,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742261_1437 (size=12151) 2024-11-23T03:22:40,715 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/b779aa83901b4bb7b4e36627fead91a8 2024-11-23T03:22:40,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4267f5c3b1f14312a28012954aa7c7dc is 50, key is test_row_0/C:col10/1732332160011/Put/seqid=0 2024-11-23T03:22:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742262_1438 (size=12151) 2024-11-23T03:22:40,763 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4267f5c3b1f14312a28012954aa7c7dc 2024-11-23T03:22:40,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/cb7742e0c2034a9c839b70ccf9421e2d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d 2024-11-23T03:22:40,780 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d, entries=150, sequenceid=250, filesize=30.4 K 2024-11-23T03:22:40,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/b779aa83901b4bb7b4e36627fead91a8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8 2024-11-23T03:22:40,784 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8, entries=150, sequenceid=250, filesize=11.9 K 2024-11-23T03:22:40,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/4267f5c3b1f14312a28012954aa7c7dc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc 2024-11-23T03:22:40,789 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc, entries=150, sequenceid=250, filesize=11.9 K 2024-11-23T03:22:40,790 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 73db48321d40b906ec9d02fc18076043 in 591ms, sequenceid=250, compaction requested=true 2024-11-23T03:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-23T03:22:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-23T03:22:40,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-23T03:22:40,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 747 msec 2024-11-23T03:22:40,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 753 msec 2024-11-23T03:22:41,007 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42] to archive 2024-11-23T03:22:41,012 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:41,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:22:41,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:41,015 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/3fe34eb55ef145729d16839426d5a3d5 2024-11-23T03:22:41,017 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/b02356e46285424eabd438aab7120f29 2024-11-23T03:22:41,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:41,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:41,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:41,018 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/29fb037c620e4383b99ffbdc703e8d52 2024-11-23T03:22:41,023 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/69925669b1b44a51b85b13916c45d056 2024-11-23T03:22:41,024 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f3e879c264c24102b54c4b742c67f90f 2024-11-23T03:22:41,026 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fc753a5ae6c442bc9a23cc953cca0e41 2024-11-23T03:22:41,027 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cafaa984195644ff806c487ed9fbb805 2024-11-23T03:22:41,028 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ad34f09425ef479ea982036733445770 2024-11-23T03:22:41,030 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/f30fb67a45144e5d856747268eccb026 2024-11-23T03:22:41,031 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/59d052af0ad445eca970d5b4e76c7716 2024-11-23T03:22:41,033 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/7ab0233907174588a42e7d1cb5cf53df 2024-11-23T03:22:41,035 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/fb563f6576dc4e55bfb43c65e7fcde4b 2024-11-23T03:22:41,040 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/63d4f99687c24f0985ceb5b6cb13eb31 2024-11-23T03:22:41,046 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/c2380244171c44a9b9a229ded7cb7b42 2024-11-23T03:22:41,049 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/02df864f4fb24e3c92e299c8fb128c7b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/db80d8ef6b25491dad13fdf79bd3a85b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/385509cb6ff74740934d43cd1c190fd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e] to archive 2024-11-23T03:22:41,051 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:41,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123be97a97e7bb04249be1f257641d7ebba_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:41,053 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/78f5c04b717843a4b523cfe3707968f2 2024-11-23T03:22:41,055 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/0c2e0031cc56497294774c94ff56fb23 2024-11-23T03:22:41,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/24c6ca012e1a41fea8a58271ba4805d7 2024-11-23T03:22:41,058 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/02df864f4fb24e3c92e299c8fb128c7b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/02df864f4fb24e3c92e299c8fb128c7b 2024-11-23T03:22:41,061 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/350f6d99fe9d4f66a32195ce9110fde1 2024-11-23T03:22:41,065 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/745e395aeae04d78b8742ffca77ceee2 2024-11-23T03:22:41,067 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/db80d8ef6b25491dad13fdf79bd3a85b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/db80d8ef6b25491dad13fdf79bd3a85b 2024-11-23T03:22:41,068 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5d7885a27a4e70847b1fd3845447c2 2024-11-23T03:22:41,069 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/bb78aed3aedf4ba9b621629d05e4f7a8 2024-11-23T03:22:41,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332221057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,071 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/10dd465209e34ea59fc84dab259728fb 2024-11-23T03:22:41,072 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/385509cb6ff74740934d43cd1c190fd4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/385509cb6ff74740934d43cd1c190fd4 2024-11-23T03:22:41,074 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/721bff5de03f42b5b553f6c6a1cd1355 2024-11-23T03:22:41,075 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/379ad240c0714dd282a0315d1d27ad16 2024-11-23T03:22:41,076 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/170e2b3fc10f4841b2fcd4dbfc3ace3e 2024-11-23T03:22:41,078 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/636e58263d8c4d80b9303aefaf8f768f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b97e2ad3e6a048d89f8657c2227f0050, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/5af9545ee00047f8b1d30d770693f00f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c] to archive 2024-11-23T03:22:41,080 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:41,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332221069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332221070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332221071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,084 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/f559fed2a9d94ad4a129a0960bc10841 2024-11-23T03:22:41,085 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/8d257459708d4c358aa5ac4ed580370f 2024-11-23T03:22:41,086 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/96a7eb2826374e51aa2d5650145e532f 2024-11-23T03:22:41,087 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/636e58263d8c4d80b9303aefaf8f768f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/636e58263d8c4d80b9303aefaf8f768f 2024-11-23T03:22:41,088 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ebb6584c514745a49e1c12861b87404c 2024-11-23T03:22:41,089 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/7189decdf8db4468ad8aa941e256f9ef 2024-11-23T03:22:41,090 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b97e2ad3e6a048d89f8657c2227f0050 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b97e2ad3e6a048d89f8657c2227f0050 2024-11-23T03:22:41,092 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/b1d600c5bb9f4dcfa6a02f13bdcd554b 2024-11-23T03:22:41,093 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1fa380397b0a41958385f27d2e5aace0 2024-11-23T03:22:41,094 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4b5feae61fa648028d78cec561c47427 2024-11-23T03:22:41,095 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/5af9545ee00047f8b1d30d770693f00f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/5af9545ee00047f8b1d30d770693f00f 2024-11-23T03:22:41,096 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e83c977efb8045db95f5ba58c0fbfc28 2024-11-23T03:22:41,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742263_1439 (size=14994) 2024-11-23T03:22:41,098 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a4a15e3134e741a998d40a425792a024 2024-11-23T03:22:41,103 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0d51875c74df:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/cc70dc4d268547068ba433c82b8c432c 2024-11-23T03:22:41,103 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:41,108 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123be97a97e7bb04249be1f257641d7ebba_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123be97a97e7bb04249be1f257641d7ebba_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:41,109 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/6f8884fc3370452c9435959a02761dd4, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:41,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/6f8884fc3370452c9435959a02761dd4 is 175, key is test_row_0/A:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T03:22:41,149 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-23T03:22:41,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:41,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-23T03:22:41,152 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:41,153 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:41,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742264_1440 (size=39949) 2024-11-23T03:22:41,164 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/6f8884fc3370452c9435959a02761dd4 2024-11-23T03:22:41,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332221171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/f0c4be9fddbd414fb4ed3397ef5a9ddb is 50, key is test_row_0/B:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:41,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332221183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332221183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332221184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742265_1441 (size=12301) 2024-11-23T03:22:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:41,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:41,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:41,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332221382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332221390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332221390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332221390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:41,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:41,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:41,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60554 deadline: 1732332221611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,617 DEBUG [Thread-1664 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8236 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:22:41,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:41,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:41,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/f0c4be9fddbd414fb4ed3397ef5a9ddb 2024-11-23T03:22:41,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d57d936e0b154ebaa9babe06a0f3e5a7 is 50, key is test_row_0/C:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742266_1442 (size=12301) 2024-11-23T03:22:41,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332221699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332221705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332221706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:41,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332221707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:41,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:41,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:41,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:41,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:41,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:41,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:41,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:41,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d57d936e0b154ebaa9babe06a0f3e5a7 2024-11-23T03:22:42,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/6f8884fc3370452c9435959a02761dd4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4 2024-11-23T03:22:42,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4, entries=200, sequenceid=276, filesize=39.0 K 2024-11-23T03:22:42,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/f0c4be9fddbd414fb4ed3397ef5a9ddb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb 2024-11-23T03:22:42,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:22:42,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d57d936e0b154ebaa9babe06a0f3e5a7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7 2024-11-23T03:22:42,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:22:42,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 73db48321d40b906ec9d02fc18076043 in 1106ms, sequenceid=276, compaction requested=true 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:42,120 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:42,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:42,120 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:42,123 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:42,123 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:42,123 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,124 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/1bad0c3d830249b38b0435ea011731ba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=48.1 K 2024-11-23T03:22:42,124 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bad0c3d830249b38b0435ea011731ba, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:42,124 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142386 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:42,124 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:42,124 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,125 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=139.0 K 2024-11-23T03:22:42,125 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,125 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4] 2024-11-23T03:22:42,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 04c7fe7ec07c4826a7edd72a6a86c1a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732332159334 2024-11-23T03:22:42,125 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c86c3f51241416695fe0468e8a0cfa1, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:42,126 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77b1b9f0357748859a6f11c43c48c3dc, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732332159324 2024-11-23T03:22:42,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b779aa83901b4bb7b4e36627fead91a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732332159999 2024-11-23T03:22:42,126 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb7742e0c2034a9c839b70ccf9421e2d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732332159999 2024-11-23T03:22:42,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f0c4be9fddbd414fb4ed3397ef5a9ddb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:42,127 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f8884fc3370452c9435959a02761dd4, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:42,146 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#374 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:42,146 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/a054489c694242b7a7ef8210225896c9 is 50, key is test_row_0/B:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:42,153 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,168 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123900f5082ba9849b295ffd79291c491e5_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,170 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123900f5082ba9849b295ffd79291c491e5_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,171 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123900f5082ba9849b295ffd79291c491e5_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742268_1444 (size=4469) 2024-11-23T03:22:42,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742267_1443 (size=12439) 2024-11-23T03:22:42,216 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#375 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:42,217 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ce4a0bc4a0074de09d9a030bf288975b is 175, key is test_row_0/A:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:42,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:22:42,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:42,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:42,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:42,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,231 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/a054489c694242b7a7ef8210225896c9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/a054489c694242b7a7ef8210225896c9 2024-11-23T03:22:42,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,238 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into a054489c694242b7a7ef8210225896c9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:42,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:42,238 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=12, startTime=1732332162120; duration=0sec 2024-11-23T03:22:42,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:42,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:42,238 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:22:42,240 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:22:42,240 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:42,240 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,240 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/212697ff61784ffca85b1667f789426c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=48.1 K 2024-11-23T03:22:42,241 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 212697ff61784ffca85b1667f789426c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732332158149 2024-11-23T03:22:42,241 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c51a7932f2ed4f5bafb5a3401887e1cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732332159334 2024-11-23T03:22:42,242 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4267f5c3b1f14312a28012954aa7c7dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732332159999 2024-11-23T03:22:42,242 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d57d936e0b154ebaa9babe06a0f3e5a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:42,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742269_1445 (size=31393) 2024-11-23T03:22:42,247 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/ce4a0bc4a0074de09d9a030bf288975b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b 2024-11-23T03:22:42,252 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into ce4a0bc4a0074de09d9a030bf288975b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:42,252 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:42,252 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=12, startTime=1732332162120; duration=0sec 2024-11-23T03:22:42,252 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:42,252 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:42,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233621ce3ca8c74a8f89c033cbc5cabec7_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332162218/Put/seqid=0 2024-11-23T03:22:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:42,257 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:42,257 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/1364f6176ae349a2bcd57bbc481f2f4c is 50, key is test_row_0/C:col10/1732332161012/Put/seqid=0 2024-11-23T03:22:42,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742271_1447 (size=12454) 2024-11-23T03:22:42,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332222283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332222284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332222284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742270_1446 (size=12439) 2024-11-23T03:22:42,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332222285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332222392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332222393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332222394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332222395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332222598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332222604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332222604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332222606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,668 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:42,674 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233621ce3ca8c74a8f89c033cbc5cabec7_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233621ce3ca8c74a8f89c033cbc5cabec7_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:42,682 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/765c3821b66440de8a2dba27ab5ab3de, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/765c3821b66440de8a2dba27ab5ab3de is 175, key is test_row_0/A:col10/1732332162218/Put/seqid=0 2024-11-23T03:22:42,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742272_1448 (size=31255) 2024-11-23T03:22:42,701 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/765c3821b66440de8a2dba27ab5ab3de 2024-11-23T03:22:42,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,704 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/1364f6176ae349a2bcd57bbc481f2f4c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1364f6176ae349a2bcd57bbc481f2f4c 2024-11-23T03:22:42,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into 1364f6176ae349a2bcd57bbc481f2f4c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:42,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:42,713 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=12, startTime=1732332162120; duration=0sec 2024-11-23T03:22:42,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:42,713 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:42,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/168e6f9afe3f4ab0a25258c5423b8978 is 50, key is test_row_0/B:col10/1732332162218/Put/seqid=0 2024-11-23T03:22:42,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742273_1449 (size=12301) 2024-11-23T03:22:42,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/168e6f9afe3f4ab0a25258c5423b8978 2024-11-23T03:22:42,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/721ee8dc97c94a1a87cae0ed494d4089 is 50, key is test_row_0/C:col10/1732332162218/Put/seqid=0 2024-11-23T03:22:42,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742274_1450 (size=12301) 2024-11-23T03:22:42,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/721ee8dc97c94a1a87cae0ed494d4089 2024-11-23T03:22:42,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/765c3821b66440de8a2dba27ab5ab3de as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de 2024-11-23T03:22:42,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de, entries=150, sequenceid=291, filesize=30.5 K 2024-11-23T03:22:42,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/168e6f9afe3f4ab0a25258c5423b8978 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978 2024-11-23T03:22:42,855 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:42,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:42,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:42,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:42,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978, entries=150, sequenceid=291, filesize=12.0 K 2024-11-23T03:22:42,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/721ee8dc97c94a1a87cae0ed494d4089 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089 2024-11-23T03:22:42,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089, entries=150, sequenceid=291, filesize=12.0 K 2024-11-23T03:22:42,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 73db48321d40b906ec9d02fc18076043 in 644ms, sequenceid=291, compaction requested=false 2024-11-23T03:22:42,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:42,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:22:42,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:42,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:42,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:42,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:42,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123259e3135a70f40dcb3211a7c74285d24_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332222929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742275_1451 (size=12454) 2024-11-23T03:22:42,941 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332222935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,945 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123259e3135a70f40dcb3211a7c74285d24_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123259e3135a70f40dcb3211a7c74285d24_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:42,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332222939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,947 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/739633286c8d432daf49f1dad0930eef, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:42,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332222939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:42,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/739633286c8d432daf49f1dad0930eef is 175, key is test_row_0/A:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:42,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742276_1452 (size=31255) 2024-11-23T03:22:43,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332223040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332223043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332223049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332223049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,163 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332223249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332223249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332223258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332223258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,315 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,354 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/739633286c8d432daf49f1dad0930eef 2024-11-23T03:22:43,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5eeb113d9b447191a90661501c049a is 50, key is test_row_0/B:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742277_1453 (size=12301) 2024-11-23T03:22:43,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332223559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332223559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332223566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332223568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,773 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5eeb113d9b447191a90661501c049a 2024-11-23T03:22:43,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ddab2bd322954ba591b09ed1dc1e20b3 is 50, key is test_row_0/C:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:43,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742278_1454 (size=12301) 2024-11-23T03:22:43,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:43,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:43,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:43,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:44,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332224062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:44,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332224064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332224071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332224072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ddab2bd322954ba591b09ed1dc1e20b3 2024-11-23T03:22:44,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/739633286c8d432daf49f1dad0930eef as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef 2024-11-23T03:22:44,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef, entries=150, sequenceid=316, filesize=30.5 K 2024-11-23T03:22:44,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ef5eeb113d9b447191a90661501c049a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a 2024-11-23T03:22:44,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a, entries=150, sequenceid=316, filesize=12.0 K 2024-11-23T03:22:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/ddab2bd322954ba591b09ed1dc1e20b3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3 2024-11-23T03:22:44,232 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3, entries=150, sequenceid=316, filesize=12.0 K 2024-11-23T03:22:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 73db48321d40b906ec9d02fc18076043 in 1326ms, sequenceid=316, compaction requested=true 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:44,234 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:44,234 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:44,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:44,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93903 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:44,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:44,235 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:44,235 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=91.7 K 2024-11-23T03:22:44,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:44,235 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,235 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef] 2024-11-23T03:22:44,235 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/a054489c694242b7a7ef8210225896c9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.2 K 2024-11-23T03:22:44,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce4a0bc4a0074de09d9a030bf288975b, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:44,236 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a054489c694242b7a7ef8210225896c9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:44,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 765c3821b66440de8a2dba27ab5ab3de, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732332161067 2024-11-23T03:22:44,236 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 168e6f9afe3f4ab0a25258c5423b8978, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732332161067 2024-11-23T03:22:44,237 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 739633286c8d432daf49f1dad0930eef, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:44,237 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ef5eeb113d9b447191a90661501c049a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:44,245 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#383 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:44,245 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/517087c285bf4f0f92646d64ee894e08 is 50, key is test_row_0/B:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:44,246 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:44,247 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411234478699927bc44ea82f70d4065005ae6_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:44,249 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411234478699927bc44ea82f70d4065005ae6_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:44,249 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234478699927bc44ea82f70d4065005ae6_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:44,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742279_1455 (size=12541) 2024-11-23T03:22:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742280_1456 (size=4469) 2024-11-23T03:22:44,258 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#384 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:44,258 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/82a6105483ad4fa987ad529eab78a635 is 175, key is test_row_0/A:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:44,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742281_1457 (size=31495) 2024-11-23T03:22:44,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:44,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T03:22:44,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,386 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:22:44,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112309bea810781449348de16aa473dc1eed_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332162926/Put/seqid=0 2024-11-23T03:22:44,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742282_1458 (size=12454) 2024-11-23T03:22:44,657 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/517087c285bf4f0f92646d64ee894e08 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/517087c285bf4f0f92646d64ee894e08 2024-11-23T03:22:44,661 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into 517087c285bf4f0f92646d64ee894e08(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:44,661 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:44,661 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=13, startTime=1732332164234; duration=0sec 2024-11-23T03:22:44,662 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:44,662 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:44,662 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:44,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:44,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:44,663 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:44,663 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1364f6176ae349a2bcd57bbc481f2f4c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.2 K 2024-11-23T03:22:44,663 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1364f6176ae349a2bcd57bbc481f2f4c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332160368 2024-11-23T03:22:44,664 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 721ee8dc97c94a1a87cae0ed494d4089, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732332161067 2024-11-23T03:22:44,664 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ddab2bd322954ba591b09ed1dc1e20b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:44,666 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/82a6105483ad4fa987ad529eab78a635 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635 2024-11-23T03:22:44,670 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into 82a6105483ad4fa987ad529eab78a635(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:44,670 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:44,671 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=13, startTime=1732332164234; duration=0sec 2024-11-23T03:22:44,671 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:44,671 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:44,672 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:44,672 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/e2ae704667cd4ba798617a96309219d4 is 50, key is test_row_0/C:col10/1732332162282/Put/seqid=0 2024-11-23T03:22:44,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742283_1459 (size=12541) 2024-11-23T03:22:44,693 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/e2ae704667cd4ba798617a96309219d4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e2ae704667cd4ba798617a96309219d4 2024-11-23T03:22:44,699 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into e2ae704667cd4ba798617a96309219d4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:44,699 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:44,699 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=13, startTime=1732332164234; duration=0sec 2024-11-23T03:22:44,699 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:44,699 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:44,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:44,804 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112309bea810781449348de16aa473dc1eed_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309bea810781449348de16aa473dc1eed_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:44,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/883cc19b7c854628a1f445db92c0c0d6, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:44,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/883cc19b7c854628a1f445db92c0c0d6 is 175, key is test_row_0/A:col10/1732332162926/Put/seqid=0 2024-11-23T03:22:44,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742284_1460 (size=31255) 2024-11-23T03:22:44,823 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/883cc19b7c854628a1f445db92c0c0d6 2024-11-23T03:22:44,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/e139fb99c7434a70ada3fbd78004f93e is 50, key is test_row_0/B:col10/1732332162926/Put/seqid=0 2024-11-23T03:22:44,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742285_1461 (size=12301) 2024-11-23T03:22:45,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:45,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. as already flushing 2024-11-23T03:22:45,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332225097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332225100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332225101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332225102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332225203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332225206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332225206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332225206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,239 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/e139fb99c7434a70ada3fbd78004f93e 2024-11-23T03:22:45,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/62e078a56cd248b4afb131ffdeaf040b is 50, key is test_row_0/C:col10/1732332162926/Put/seqid=0 2024-11-23T03:22:45,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:45,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742286_1462 (size=12301) 2024-11-23T03:22:45,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60616 deadline: 1732332225406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60566 deadline: 1732332225410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60548 deadline: 1732332225410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:45,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60596 deadline: 1732332225411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:45,513 DEBUG [Thread-1673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:61411 2024-11-23T03:22:45,514 DEBUG [Thread-1673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,514 DEBUG [Thread-1675 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:61411 2024-11-23T03:22:45,514 DEBUG [Thread-1679 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:61411 2024-11-23T03:22:45,514 DEBUG [Thread-1675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,514 DEBUG [Thread-1679 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,517 DEBUG [Thread-1681 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:61411 2024-11-23T03:22:45,518 DEBUG [Thread-1681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,518 DEBUG [Thread-1677 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:61411 2024-11-23T03:22:45,518 DEBUG [Thread-1677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,672 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/62e078a56cd248b4afb131ffdeaf040b 2024-11-23T03:22:45,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/883cc19b7c854628a1f445db92c0c0d6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6 2024-11-23T03:22:45,679 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6, entries=150, sequenceid=331, filesize=30.5 K 2024-11-23T03:22:45,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/e139fb99c7434a70ada3fbd78004f93e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e 2024-11-23T03:22:45,683 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e, entries=150, sequenceid=331, filesize=12.0 K 2024-11-23T03:22:45,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/62e078a56cd248b4afb131ffdeaf040b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b 2024-11-23T03:22:45,687 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b, entries=150, sequenceid=331, filesize=12.0 K 2024-11-23T03:22:45,688 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 73db48321d40b906ec9d02fc18076043 in 1301ms, sequenceid=331, compaction requested=false 2024-11-23T03:22:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:45,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-23T03:22:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-23T03:22:45,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-23T03:22:45,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.5360 sec 2024-11-23T03:22:45,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 4.5390 sec 2024-11-23T03:22:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:45,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:22:45,714 DEBUG [Thread-1666 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:61411 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:45,714 DEBUG [Thread-1666 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:45,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:45,715 DEBUG [Thread-1670 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:61411 2024-11-23T03:22:45,715 DEBUG [Thread-1670 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,718 DEBUG [Thread-1668 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:61411 2024-11-23T03:22:45,718 DEBUG [Thread-1668 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,718 DEBUG [Thread-1662 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bf5e2f0 to 127.0.0.1:61411 2024-11-23T03:22:45,718 DEBUG [Thread-1662 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:45,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237bdfb461f6034cd89e774ee8ef6de5a5_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:45,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742287_1463 (size=12454) 2024-11-23T03:22:46,124 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:46,127 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237bdfb461f6034cd89e774ee8ef6de5a5_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237bdfb461f6034cd89e774ee8ef6de5a5_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:46,128 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/1dbe827017ed48719963fb5df53bace2, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:46,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/1dbe827017ed48719963fb5df53bace2 is 175, key is test_row_0/A:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:46,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742288_1464 (size=31255) 2024-11-23T03:22:46,532 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/1dbe827017ed48719963fb5df53bace2 2024-11-23T03:22:46,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ca850262b2fd41b2929bb2c8ca896c22 is 50, key is test_row_0/B:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:46,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742289_1465 (size=12301) 2024-11-23T03:22:46,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ca850262b2fd41b2929bb2c8ca896c22 2024-11-23T03:22:46,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d2d344b000ae4afc9f5b5910322576cb is 50, key is test_row_0/C:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:46,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742290_1466 (size=12301) 2024-11-23T03:22:47,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d2d344b000ae4afc9f5b5910322576cb 2024-11-23T03:22:47,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/1dbe827017ed48719963fb5df53bace2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2 2024-11-23T03:22:47,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2, entries=150, sequenceid=356, filesize=30.5 K 2024-11-23T03:22:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/ca850262b2fd41b2929bb2c8ca896c22 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22 2024-11-23T03:22:47,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:22:47,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/d2d344b000ae4afc9f5b5910322576cb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb 2024-11-23T03:22:47,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:22:47,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=20.13 KB/20610 for 73db48321d40b906ec9d02fc18076043 in 1650ms, sequenceid=356, compaction requested=true 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73db48321d40b906ec9d02fc18076043:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:47,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:47,364 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:47,364 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94005 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37143 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/A is initiating minor compaction (all files) 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/B is initiating minor compaction (all files) 2024-11-23T03:22:47,365 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/A in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:47,365 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/B in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:47,365 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=91.8 K 2024-11-23T03:22:47,365 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:47,365 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/517087c285bf4f0f92646d64ee894e08, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.3 K 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2] 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82a6105483ad4fa987ad529eab78a635, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:47,365 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 517087c285bf4f0f92646d64ee894e08, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:47,366 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 883cc19b7c854628a1f445db92c0c0d6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332162926 2024-11-23T03:22:47,366 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e139fb99c7434a70ada3fbd78004f93e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332162926 2024-11-23T03:22:47,366 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dbe827017ed48719963fb5df53bace2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332165088 2024-11-23T03:22:47,366 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ca850262b2fd41b2929bb2c8ca896c22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332165088 2024-11-23T03:22:47,371 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:47,372 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#B#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:47,372 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/353952c91cb04d2c8422f2ee59379698 is 50, key is test_row_0/B:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:47,372 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123b5a3fb3f72ab46d79c6f998f474d240c_73db48321d40b906ec9d02fc18076043 store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:47,375 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123b5a3fb3f72ab46d79c6f998f474d240c_73db48321d40b906ec9d02fc18076043, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:47,376 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b5a3fb3f72ab46d79c6f998f474d240c_73db48321d40b906ec9d02fc18076043 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:47,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742291_1467 (size=12643) 2024-11-23T03:22:47,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742292_1468 (size=4469) 2024-11-23T03:22:47,781 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/353952c91cb04d2c8422f2ee59379698 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/353952c91cb04d2c8422f2ee59379698 2024-11-23T03:22:47,782 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#A#compaction#393 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:47,783 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/af72cab44bed4c56b52446666757ffdd is 175, key is test_row_0/A:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:47,785 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/B of 73db48321d40b906ec9d02fc18076043 into 353952c91cb04d2c8422f2ee59379698(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:47,785 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:47,785 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/B, priority=13, startTime=1732332167364; duration=0sec 2024-11-23T03:22:47,785 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:47,785 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:B 2024-11-23T03:22:47,785 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:47,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742293_1469 (size=31597) 2024-11-23T03:22:47,786 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37143 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:47,786 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 73db48321d40b906ec9d02fc18076043/C is initiating minor compaction (all files) 2024-11-23T03:22:47,786 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73db48321d40b906ec9d02fc18076043/C in TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:47,786 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e2ae704667cd4ba798617a96309219d4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp, totalSize=36.3 K 2024-11-23T03:22:47,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e2ae704667cd4ba798617a96309219d4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332162282 2024-11-23T03:22:47,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 62e078a56cd248b4afb131ffdeaf040b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732332162926 2024-11-23T03:22:47,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d2d344b000ae4afc9f5b5910322576cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332165088 2024-11-23T03:22:47,793 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73db48321d40b906ec9d02fc18076043#C#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:47,793 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/2bb4071e20ee479794b6e01565a4be4b is 50, key is test_row_0/C:col10/1732332165088/Put/seqid=0 2024-11-23T03:22:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742294_1470 (size=12643) 2024-11-23T03:22:48,190 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/af72cab44bed4c56b52446666757ffdd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/af72cab44bed4c56b52446666757ffdd 2024-11-23T03:22:48,194 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/A of 73db48321d40b906ec9d02fc18076043 into af72cab44bed4c56b52446666757ffdd(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:48,194 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:48,194 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/A, priority=13, startTime=1732332167364; duration=0sec 2024-11-23T03:22:48,194 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:48,194 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:A 2024-11-23T03:22:48,199 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/2bb4071e20ee479794b6e01565a4be4b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/2bb4071e20ee479794b6e01565a4be4b 2024-11-23T03:22:48,202 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73db48321d40b906ec9d02fc18076043/C of 73db48321d40b906ec9d02fc18076043 into 2bb4071e20ee479794b6e01565a4be4b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:48,202 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:48,202 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043., storeName=73db48321d40b906ec9d02fc18076043/C, priority=13, startTime=1732332167364; duration=0sec 2024-11-23T03:22:48,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:48,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73db48321d40b906ec9d02fc18076043:C 2024-11-23T03:22:49,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T03:22:49,258 INFO [Thread-1672 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-23T03:22:51,700 DEBUG [Thread-1664 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:61411 2024-11-23T03:22:51,700 DEBUG [Thread-1664 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1688 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5063 rows 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1701 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5103 rows 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1682 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5046 rows 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1702 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5106 rows 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1698 2024-11-23T03:22:51,701 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5094 rows 2024-11-23T03:22:51,701 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:22:51,701 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f7f772a to 127.0.0.1:61411 2024-11-23T03:22:51,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:22:51,703 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:22:51,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:22:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:51,707 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332171707"}]},"ts":"1732332171707"} 2024-11-23T03:22:51,709 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:22:51,710 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:22:51,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:22:51,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, UNASSIGN}] 2024-11-23T03:22:51,712 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, UNASSIGN 2024-11-23T03:22:51,713 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:51,713 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:22:51,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:51,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:51,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:51,865 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 73db48321d40b906ec9d02fc18076043, disabling compactions & flushes 2024-11-23T03:22:51,865 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. after waiting 0 ms 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:51,865 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 73db48321d40b906ec9d02fc18076043 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=A 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=B 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 73db48321d40b906ec9d02fc18076043, store=C 2024-11-23T03:22:51,865 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:51,870 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123df9c2e7497c242cb917747bc767971d2_73db48321d40b906ec9d02fc18076043 is 50, key is test_row_0/A:col10/1732332165717/Put/seqid=0 2024-11-23T03:22:51,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742295_1471 (size=12454) 2024-11-23T03:22:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:52,274 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:52,277 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123df9c2e7497c242cb917747bc767971d2_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123df9c2e7497c242cb917747bc767971d2_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:52,278 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/5db56cd8b4d940388e4e0e4b72446135, store: [table=TestAcidGuarantees family=A region=73db48321d40b906ec9d02fc18076043] 2024-11-23T03:22:52,278 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/5db56cd8b4d940388e4e0e4b72446135 is 175, key is test_row_0/A:col10/1732332165717/Put/seqid=0 2024-11-23T03:22:52,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742296_1472 (size=31255) 2024-11-23T03:22:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:52,682 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/5db56cd8b4d940388e4e0e4b72446135 2024-11-23T03:22:52,688 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/6bfe92228cdc41cc9f7bf860da040f15 is 50, key is test_row_0/B:col10/1732332165717/Put/seqid=0 2024-11-23T03:22:52,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742297_1473 (size=12301) 2024-11-23T03:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:53,092 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/6bfe92228cdc41cc9f7bf860da040f15 2024-11-23T03:22:53,098 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a58d86cc9e5645d4a1ba831cee48396d is 50, key is test_row_0/C:col10/1732332165717/Put/seqid=0 2024-11-23T03:22:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742298_1474 (size=12301) 2024-11-23T03:22:53,501 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a58d86cc9e5645d4a1ba831cee48396d 2024-11-23T03:22:53,505 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/A/5db56cd8b4d940388e4e0e4b72446135 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/5db56cd8b4d940388e4e0e4b72446135 2024-11-23T03:22:53,507 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/5db56cd8b4d940388e4e0e4b72446135, entries=150, sequenceid=366, filesize=30.5 K 2024-11-23T03:22:53,508 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/B/6bfe92228cdc41cc9f7bf860da040f15 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/6bfe92228cdc41cc9f7bf860da040f15 2024-11-23T03:22:53,511 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/6bfe92228cdc41cc9f7bf860da040f15, entries=150, sequenceid=366, filesize=12.0 K 2024-11-23T03:22:53,511 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/.tmp/C/a58d86cc9e5645d4a1ba831cee48396d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a58d86cc9e5645d4a1ba831cee48396d 2024-11-23T03:22:53,514 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a58d86cc9e5645d4a1ba831cee48396d, entries=150, sequenceid=366, filesize=12.0 K 2024-11-23T03:22:53,514 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 73db48321d40b906ec9d02fc18076043 in 1649ms, sequenceid=366, compaction requested=false 2024-11-23T03:22:53,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2] to archive 2024-11-23T03:22:53,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:53,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/8c86c3f51241416695fe0468e8a0cfa1 2024-11-23T03:22:53,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/77b1b9f0357748859a6f11c43c48c3dc 2024-11-23T03:22:53,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/cb7742e0c2034a9c839b70ccf9421e2d 2024-11-23T03:22:53,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/6f8884fc3370452c9435959a02761dd4 2024-11-23T03:22:53,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/ce4a0bc4a0074de09d9a030bf288975b 2024-11-23T03:22:53,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/765c3821b66440de8a2dba27ab5ab3de 2024-11-23T03:22:53,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/82a6105483ad4fa987ad529eab78a635 2024-11-23T03:22:53,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/739633286c8d432daf49f1dad0930eef 2024-11-23T03:22:53,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/883cc19b7c854628a1f445db92c0c0d6 2024-11-23T03:22:53,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/1dbe827017ed48719963fb5df53bace2 2024-11-23T03:22:53,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/1bad0c3d830249b38b0435ea011731ba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/a054489c694242b7a7ef8210225896c9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/517087c285bf4f0f92646d64ee894e08, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22] to archive 2024-11-23T03:22:53,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:53,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/1bad0c3d830249b38b0435ea011731ba to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/1bad0c3d830249b38b0435ea011731ba 2024-11-23T03:22:53,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/04c7fe7ec07c4826a7edd72a6a86c1a1 2024-11-23T03:22:53,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/b779aa83901b4bb7b4e36627fead91a8 2024-11-23T03:22:53,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/a054489c694242b7a7ef8210225896c9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/a054489c694242b7a7ef8210225896c9 2024-11-23T03:22:53,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/f0c4be9fddbd414fb4ed3397ef5a9ddb 2024-11-23T03:22:53,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/168e6f9afe3f4ab0a25258c5423b8978 2024-11-23T03:22:53,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/517087c285bf4f0f92646d64ee894e08 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/517087c285bf4f0f92646d64ee894e08 2024-11-23T03:22:53,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ef5eeb113d9b447191a90661501c049a 2024-11-23T03:22:53,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/e139fb99c7434a70ada3fbd78004f93e 2024-11-23T03:22:53,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/ca850262b2fd41b2929bb2c8ca896c22 2024-11-23T03:22:53,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/212697ff61784ffca85b1667f789426c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1364f6176ae349a2bcd57bbc481f2f4c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e2ae704667cd4ba798617a96309219d4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb] to archive 2024-11-23T03:22:53,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:22:53,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/212697ff61784ffca85b1667f789426c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/212697ff61784ffca85b1667f789426c 2024-11-23T03:22:53,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/c51a7932f2ed4f5bafb5a3401887e1cb 2024-11-23T03:22:53,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/4267f5c3b1f14312a28012954aa7c7dc 2024-11-23T03:22:53,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1364f6176ae349a2bcd57bbc481f2f4c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/1364f6176ae349a2bcd57bbc481f2f4c 2024-11-23T03:22:53,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d57d936e0b154ebaa9babe06a0f3e5a7 2024-11-23T03:22:53,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/721ee8dc97c94a1a87cae0ed494d4089 2024-11-23T03:22:53,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e2ae704667cd4ba798617a96309219d4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/e2ae704667cd4ba798617a96309219d4 2024-11-23T03:22:53,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/ddab2bd322954ba591b09ed1dc1e20b3 2024-11-23T03:22:53,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/62e078a56cd248b4afb131ffdeaf040b 2024-11-23T03:22:53,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/d2d344b000ae4afc9f5b5910322576cb 2024-11-23T03:22:53,547 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits/369.seqid, newMaxSeqId=369, maxSeqId=4 2024-11-23T03:22:53,547 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043. 2024-11-23T03:22:53,548 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 73db48321d40b906ec9d02fc18076043: 2024-11-23T03:22:53,549 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,549 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=73db48321d40b906ec9d02fc18076043, regionState=CLOSED 2024-11-23T03:22:53,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-23T03:22:53,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure 73db48321d40b906ec9d02fc18076043, server=0d51875c74df,34141,1732332039937 in 1.8370 sec 2024-11-23T03:22:53,551 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-11-23T03:22:53,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=73db48321d40b906ec9d02fc18076043, UNASSIGN in 1.8390 sec 2024-11-23T03:22:53,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-23T03:22:53,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8410 sec 2024-11-23T03:22:53,553 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332173553"}]},"ts":"1732332173553"} 2024-11-23T03:22:53,554 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:22:53,556 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:22:53,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8520 sec 2024-11-23T03:22:53,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T03:22:53,810 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-23T03:22:53,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:22:53,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,813 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,814 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-23T03:22:53,844 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,849 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits] 2024-11-23T03:22:53,854 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/5db56cd8b4d940388e4e0e4b72446135 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/5db56cd8b4d940388e4e0e4b72446135 2024-11-23T03:22:53,857 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/af72cab44bed4c56b52446666757ffdd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/A/af72cab44bed4c56b52446666757ffdd 2024-11-23T03:22:53,861 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/353952c91cb04d2c8422f2ee59379698 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/353952c91cb04d2c8422f2ee59379698 2024-11-23T03:22:53,862 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/6bfe92228cdc41cc9f7bf860da040f15 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/B/6bfe92228cdc41cc9f7bf860da040f15 2024-11-23T03:22:53,869 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/2bb4071e20ee479794b6e01565a4be4b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/2bb4071e20ee479794b6e01565a4be4b 2024-11-23T03:22:53,871 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a58d86cc9e5645d4a1ba831cee48396d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/C/a58d86cc9e5645d4a1ba831cee48396d 2024-11-23T03:22:53,875 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits/369.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043/recovered.edits/369.seqid 2024-11-23T03:22:53,876 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,877 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:22:53,878 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:22:53,879 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T03:22:53,889 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123009f1ee65e90402d89bfbd6ac2884b2a_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123009f1ee65e90402d89bfbd6ac2884b2a_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,891 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123031f454e18f04e599e82adb9e26df623_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123031f454e18f04e599e82adb9e26df623_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,892 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309bea810781449348de16aa473dc1eed_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309bea810781449348de16aa473dc1eed_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,894 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123259e3135a70f40dcb3211a7c74285d24_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123259e3135a70f40dcb3211a7c74285d24_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,901 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233621ce3ca8c74a8f89c033cbc5cabec7_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233621ce3ca8c74a8f89c033cbc5cabec7_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,903 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234177fc29ac2a458996ea98dbd056ec35_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234177fc29ac2a458996ea98dbd056ec35_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,904 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235a2ddca3791f443aa9f2bd838d3045da_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235a2ddca3791f443aa9f2bd838d3045da_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,906 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236114a548caf64e11bfb53d3c8fbb9d2d_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236114a548caf64e11bfb53d3c8fbb9d2d_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,908 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112372ebce64d327453bb368176c8aa98ca7_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112372ebce64d327453bb368176c8aa98ca7_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,909 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237bdfb461f6034cd89e774ee8ef6de5a5_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237bdfb461f6034cd89e774ee8ef6de5a5_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,911 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d96e9e68b85499f9a7df306610a2ab4_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d96e9e68b85499f9a7df306610a2ab4_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,913 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238796499234d743148dcbadc8e483b7ae_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238796499234d743148dcbadc8e483b7ae_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,915 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238cc0977aad7844749630c1da303f514b_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238cc0977aad7844749630c1da303f514b_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,916 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239174221734604586a970243aab3e0509_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239174221734604586a970243aab3e0509_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-23T03:22:53,918 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b73916ebee504f0d894bf16ee7d40d55_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b73916ebee504f0d894bf16ee7d40d55_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,919 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123be97a97e7bb04249be1f257641d7ebba_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123be97a97e7bb04249be1f257641d7ebba_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,921 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb96368d1b8441f9be4fdb9254b00482_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb96368d1b8441f9be4fdb9254b00482_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,923 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d9da8c67f7944cc6a16ab06b52c0ffa2_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d9da8c67f7944cc6a16ab06b52c0ffa2_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,924 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123df9c2e7497c242cb917747bc767971d2_73db48321d40b906ec9d02fc18076043 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123df9c2e7497c242cb917747bc767971d2_73db48321d40b906ec9d02fc18076043 2024-11-23T03:22:53,925 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:22:53,928 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,937 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:22:53,947 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:22:53,956 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,956 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:22:53,958 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332173957"}]},"ts":"9223372036854775807"} 2024-11-23T03:22:53,961 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:22:53,961 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 73db48321d40b906ec9d02fc18076043, NAME => 'TestAcidGuarantees,,1732332144248.73db48321d40b906ec9d02fc18076043.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:22:53,962 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:22:53,962 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332173962"}]},"ts":"9223372036854775807"} 2024-11-23T03:22:53,969 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:22:53,973 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:53,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 162 msec 2024-11-23T03:22:54,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-23T03:22:54,118 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-23T03:22:54,128 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=449 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=550 (was 552), ProcessCount=11 (was 11), AvailableMemoryMB=3877 (was 3983) 2024-11-23T03:22:54,139 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=550, ProcessCount=11, AvailableMemoryMB=3877 2024-11-23T03:22:54,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:22:54,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:22:54,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:22:54,143 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:22:54,143 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:54,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-11-23T03:22:54,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:22:54,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:54,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742299_1475 (size=960) 2024-11-23T03:22:54,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:54,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:54,551 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:22:54,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742300_1476 (size=53) 2024-11-23T03:22:54,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:54,957 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:22:54,957 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6a5f5cf259ce583546a3c6bfa36ac47c, disabling compactions & flushes 2024-11-23T03:22:54,957 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:54,957 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:54,957 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. after waiting 0 ms 2024-11-23T03:22:54,957 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:54,958 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:54,958 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:54,959 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:22:54,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332174959"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332174959"}]},"ts":"1732332174959"} 2024-11-23T03:22:54,960 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:22:54,961 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:22:54,961 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332174961"}]},"ts":"1732332174961"} 2024-11-23T03:22:54,962 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:22:54,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, ASSIGN}] 2024-11-23T03:22:54,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, ASSIGN 2024-11-23T03:22:54,968 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:22:55,118 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=6a5f5cf259ce583546a3c6bfa36ac47c, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:55,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure 6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:22:55,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:55,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:55,275 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:55,275 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:22:55,276 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,276 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:22:55,276 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,276 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,277 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,278 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:55,278 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a5f5cf259ce583546a3c6bfa36ac47c columnFamilyName A 2024-11-23T03:22:55,278 DEBUG [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:55,279 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(327): Store=6a5f5cf259ce583546a3c6bfa36ac47c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:55,279 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,280 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:55,280 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a5f5cf259ce583546a3c6bfa36ac47c columnFamilyName B 2024-11-23T03:22:55,280 DEBUG [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:55,281 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(327): Store=6a5f5cf259ce583546a3c6bfa36ac47c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:55,281 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,282 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:22:55,282 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a5f5cf259ce583546a3c6bfa36ac47c columnFamilyName C 2024-11-23T03:22:55,282 DEBUG [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:22:55,283 INFO [StoreOpener-6a5f5cf259ce583546a3c6bfa36ac47c-1 {}] regionserver.HStore(327): Store=6a5f5cf259ce583546a3c6bfa36ac47c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:22:55,283 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:55,284 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,284 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,285 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:22:55,286 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:55,288 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:22:55,289 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened 6a5f5cf259ce583546a3c6bfa36ac47c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69484653, jitterRate=0.03540201485157013}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:22:55,289 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:55,290 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., pid=123, masterSystemTime=1732332175272 2024-11-23T03:22:55,291 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:55,291 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:55,292 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=6a5f5cf259ce583546a3c6bfa36ac47c, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:22:55,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-23T03:22:55,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure 6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 in 172 msec 2024-11-23T03:22:55,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-23T03:22:55,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, ASSIGN in 328 msec 2024-11-23T03:22:55,296 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:22:55,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332175296"}]},"ts":"1732332175296"} 2024-11-23T03:22:55,297 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:22:55,299 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:22:55,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1580 sec 2024-11-23T03:22:56,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-23T03:22:56,248 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-11-23T03:22:56,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x238db126 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3512017b 2024-11-23T03:22:56,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@301741f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,268 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,269 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,270 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:22:56,271 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:22:56,273 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-23T03:22:56,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-23T03:22:56,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-23T03:22:56,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,290 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-23T03:22:56,300 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,301 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-23T03:22:56,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34b30c39 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b7f20c4 2024-11-23T03:22:56,320 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc486e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,321 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-23T03:22:56,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-23T03:22:56,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,333 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-23T03:22:56,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,345 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-23T03:22:56,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:22:56,364 DEBUG [hconnection-0x138d91de-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,365 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-23T03:22:56,372 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:56,373 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:56,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:56,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:56,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:22:56,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:22:56,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:56,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:22:56,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:56,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:22:56,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:56,384 DEBUG [hconnection-0x6d83373d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,384 DEBUG [hconnection-0x616ad240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,385 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45728, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,386 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,400 DEBUG [hconnection-0x30e66db6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,401 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332236405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332236405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332236405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,413 DEBUG [hconnection-0x6257f456-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,414 DEBUG [hconnection-0x2b775ec8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,414 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,415 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,417 DEBUG [hconnection-0x8df9e18-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,418 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,420 DEBUG [hconnection-0x242b7ecd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,421 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e91e1dbd756745249c6f19108caa72d7 is 50, key is test_row_0/A:col10/1732332176377/Put/seqid=0 2024-11-23T03:22:56,430 DEBUG [hconnection-0x2a790797-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,431 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,432 DEBUG [hconnection-0x4e2ccb3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:22:56,433 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:22:56,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742301_1477 (size=12001) 2024-11-23T03:22:56,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332236439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332236440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332236507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332236507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332236507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:56,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:56,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332236540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332236542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:56,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:56,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:56,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332236710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332236710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332236710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332236746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332236746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:56,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:56,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e91e1dbd756745249c6f19108caa72d7 2024-11-23T03:22:56,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b92d84138f974d66aa3b7732660798f5 is 50, key is test_row_0/B:col10/1732332176377/Put/seqid=0 2024-11-23T03:22:56,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742302_1478 (size=12001) 2024-11-23T03:22:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:56,985 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:56,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:56,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:56,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332237012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332237012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332237013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332237051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332237052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,138 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:57,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:57,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:57,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:57,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:57,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:57,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:57,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:57,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:57,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b92d84138f974d66aa3b7732660798f5 2024-11-23T03:22:57,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/3a17a79cef044612ad15ba9e1031c2d3 is 50, key is test_row_0/C:col10/1732332176377/Put/seqid=0 2024-11-23T03:22:57,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742303_1479 (size=12001) 2024-11-23T03:22:57,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/3a17a79cef044612ad15ba9e1031c2d3 2024-11-23T03:22:57,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e91e1dbd756745249c6f19108caa72d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7 2024-11-23T03:22:57,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T03:22:57,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b92d84138f974d66aa3b7732660798f5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5 2024-11-23T03:22:57,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T03:22:57,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/3a17a79cef044612ad15ba9e1031c2d3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3 2024-11-23T03:22:57,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T03:22:57,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1052ms, sequenceid=12, compaction requested=false 2024-11-23T03:22:57,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:57,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-23T03:22:57,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:57,448 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:22:57,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:22:57,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:57,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:22:57,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:57,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:22:57,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:57,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/97f2a4b3647a4ec893d15683b544fdcb is 50, key is test_row_0/A:col10/1732332176402/Put/seqid=0 2024-11-23T03:22:57,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:57,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742304_1480 (size=12001) 2024-11-23T03:22:57,480 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/97f2a4b3647a4ec893d15683b544fdcb 2024-11-23T03:22:57,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/72c5bf153ad643ea8bc721ac98a08f37 is 50, key is test_row_0/B:col10/1732332176402/Put/seqid=0 2024-11-23T03:22:57,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:57,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:57,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332237527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332237528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332237531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742305_1481 (size=12001) 2024-11-23T03:22:57,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332237557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332237558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332237635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332237635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332237635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332237842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332237843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332237844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:57,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/72c5bf153ad643ea8bc721ac98a08f37 2024-11-23T03:22:57,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bc14e4d43ce044038cde82928ff3c6d3 is 50, key is test_row_0/C:col10/1732332176402/Put/seqid=0 2024-11-23T03:22:57,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742306_1482 (size=12001) 2024-11-23T03:22:58,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332238146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332238152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332238152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,364 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bc14e4d43ce044038cde82928ff3c6d3 2024-11-23T03:22:58,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/97f2a4b3647a4ec893d15683b544fdcb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb 2024-11-23T03:22:58,377 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:22:58,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/72c5bf153ad643ea8bc721ac98a08f37 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37 2024-11-23T03:22:58,382 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:22:58,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bc14e4d43ce044038cde82928ff3c6d3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3 2024-11-23T03:22:58,394 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T03:22:58,395 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6a5f5cf259ce583546a3c6bfa36ac47c in 947ms, sequenceid=39, compaction requested=false 2024-11-23T03:22:58,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:58,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-23T03:22:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-23T03:22:58,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-23T03:22:58,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0240 sec 2024-11-23T03:22:58,400 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.0280 sec 2024-11-23T03:22:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-23T03:22:58,477 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-23T03:22:58,479 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:22:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-23T03:22:58,481 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:22:58,482 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:22:58,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:22:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:22:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:58,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:22:58,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2bfe0347aa1343618cb8dd06cbf96978 is 50, key is test_row_0/A:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:58,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742307_1483 (size=14341) 2024-11-23T03:22:58,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T03:22:58,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:58,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332238660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332238661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332238662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332238669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332238669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332238776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:22:58,787 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T03:22:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332238781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332238781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,940 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T03:22:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:58,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:58,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332238983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332238990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:58,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332238991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2bfe0347aa1343618cb8dd06cbf96978 2024-11-23T03:22:59,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/78ae098ed51b4b319160bf253f13a1a8 is 50, key is test_row_0/B:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:59,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742308_1484 (size=12001) 2024-11-23T03:22:59,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/78ae098ed51b4b319160bf253f13a1a8 2024-11-23T03:22:59,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f2e5830695b24c88bb50cf8825d3fab2 is 50, key is test_row_0/C:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:59,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742309_1485 (size=12001) 2024-11-23T03:22:59,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f2e5830695b24c88bb50cf8825d3fab2 2024-11-23T03:22:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:22:59,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2bfe0347aa1343618cb8dd06cbf96978 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978 2024-11-23T03:22:59,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T03:22:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:59,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:22:59,104 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:22:59,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978, entries=200, sequenceid=50, filesize=14.0 K 2024-11-23T03:22:59,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/78ae098ed51b4b319160bf253f13a1a8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8 2024-11-23T03:22:59,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T03:22:59,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f2e5830695b24c88bb50cf8825d3fab2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2 2024-11-23T03:22:59,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T03:22:59,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6a5f5cf259ce583546a3c6bfa36ac47c in 552ms, sequenceid=50, compaction requested=true 2024-11-23T03:22:59,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:59,126 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:59,127 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:59,127 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:22:59,127 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,127 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.4 K 2024-11-23T03:22:59,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:22:59,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:59,128 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:59,128 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e91e1dbd756745249c6f19108caa72d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732332176369 2024-11-23T03:22:59,129 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97f2a4b3647a4ec893d15683b544fdcb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332176400 2024-11-23T03:22:59,129 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:59,129 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:22:59,129 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,129 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.2 K 2024-11-23T03:22:59,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:22:59,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:59,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:22:59,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:59,130 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b92d84138f974d66aa3b7732660798f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732332176369 2024-11-23T03:22:59,130 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bfe0347aa1343618cb8dd06cbf96978, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:22:59,131 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 72c5bf153ad643ea8bc721ac98a08f37, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332176400 2024-11-23T03:22:59,132 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 78ae098ed51b4b319160bf253f13a1a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:22:59,148 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#407 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:59,148 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/32176c09b4224e908e64458f9e81780a is 50, key is test_row_0/B:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:59,161 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:59,161 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/cde7911cb60b4f8491f59809e71b7285 is 50, key is test_row_0/A:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:59,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742310_1486 (size=12104) 2024-11-23T03:22:59,197 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/32176c09b4224e908e64458f9e81780a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/32176c09b4224e908e64458f9e81780a 2024-11-23T03:22:59,203 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 32176c09b4224e908e64458f9e81780a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:59,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:59,203 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332179128; duration=0sec 2024-11-23T03:22:59,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:22:59,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:22:59,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:22:59,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742311_1487 (size=12104) 2024-11-23T03:22:59,205 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:22:59,205 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:22:59,206 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,206 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.2 K 2024-11-23T03:22:59,206 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a17a79cef044612ad15ba9e1031c2d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732332176369 2024-11-23T03:22:59,207 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting bc14e4d43ce044038cde82928ff3c6d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732332176400 2024-11-23T03:22:59,207 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f2e5830695b24c88bb50cf8825d3fab2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:22:59,234 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#409 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:22:59,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/e7337271593d4df2a5ce1449a46db554 is 50, key is test_row_0/C:col10/1732332178570/Put/seqid=0 2024-11-23T03:22:59,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T03:22:59,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,248 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:22:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1eee56b2fa144184b04b1586410b5242 is 50, key is test_row_0/A:col10/1732332178654/Put/seqid=0 2024-11-23T03:22:59,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742312_1488 (size=12104) 2024-11-23T03:22:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:59,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:22:59,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742313_1489 (size=12001) 2024-11-23T03:22:59,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332239302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332239307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332239311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332239412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332239415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332239420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:22:59,611 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/cde7911cb60b4f8491f59809e71b7285 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/cde7911cb60b4f8491f59809e71b7285 2024-11-23T03:22:59,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332239619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,622 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into cde7911cb60b4f8491f59809e71b7285(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:59,622 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:59,622 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332179126; duration=0sec 2024-11-23T03:22:59,622 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:59,622 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:22:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332239623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332239627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,674 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/e7337271593d4df2a5ce1449a46db554 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e7337271593d4df2a5ce1449a46db554 2024-11-23T03:22:59,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332239673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,679 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into e7337271593d4df2a5ce1449a46db554(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:22:59,679 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:59,679 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332179130; duration=0sec 2024-11-23T03:22:59,679 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:22:59,679 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:22:59,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:22:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332239678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:22:59,696 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1eee56b2fa144184b04b1586410b5242 2024-11-23T03:22:59,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/818f055a9051476a8461d0805b755472 is 50, key is test_row_0/B:col10/1732332178654/Put/seqid=0 2024-11-23T03:22:59,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742314_1490 (size=12001) 2024-11-23T03:22:59,727 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/818f055a9051476a8461d0805b755472 2024-11-23T03:22:59,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/46890ed0d05c421a9e00397a3a92ddf8 is 50, key is test_row_0/C:col10/1732332178654/Put/seqid=0 2024-11-23T03:22:59,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742315_1491 (size=12001) 2024-11-23T03:22:59,772 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/46890ed0d05c421a9e00397a3a92ddf8 2024-11-23T03:22:59,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1eee56b2fa144184b04b1586410b5242 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242 2024-11-23T03:22:59,784 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:22:59,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/818f055a9051476a8461d0805b755472 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472 2024-11-23T03:22:59,794 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:22:59,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/46890ed0d05c421a9e00397a3a92ddf8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8 2024-11-23T03:22:59,809 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T03:22:59,813 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6a5f5cf259ce583546a3c6bfa36ac47c in 565ms, sequenceid=77, compaction requested=false 2024-11-23T03:22:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:22:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:22:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-23T03:22:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-23T03:22:59,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-23T03:22:59,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3330 sec 2024-11-23T03:22:59,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.3380 sec 2024-11-23T03:22:59,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:22:59,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:22:59,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/3d1bcb44daa34d1294bd043a755fd217 is 50, key is test_row_0/A:col10/1732332179304/Put/seqid=0 2024-11-23T03:22:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742316_1492 (size=14341) 2024-11-23T03:22:59,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/3d1bcb44daa34d1294bd043a755fd217 2024-11-23T03:23:00,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65be7c0150514d378c8b66c3de5a30ba is 50, key is test_row_0/B:col10/1732332179304/Put/seqid=0 2024-11-23T03:23:00,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332240030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332240031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332240032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742317_1493 (size=12001) 2024-11-23T03:23:00,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332240145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332240146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332240147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332240352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332240353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332240353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65be7c0150514d378c8b66c3de5a30ba 2024-11-23T03:23:00,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f6e19c6790814b79b747e07d8652d1f6 is 50, key is test_row_0/C:col10/1732332179304/Put/seqid=0 2024-11-23T03:23:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742318_1494 (size=12001) 2024-11-23T03:23:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T03:23:00,588 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-23T03:23:00,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-23T03:23:00,592 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:00,593 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:00,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:00,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332240659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332240660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:00,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332240661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:00,748 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T03:23:00,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:00,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:00,901 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:00,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T03:23:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:00,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f6e19c6790814b79b747e07d8652d1f6 2024-11-23T03:23:00,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/3d1bcb44daa34d1294bd043a755fd217 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217 2024-11-23T03:23:00,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217, entries=200, sequenceid=90, filesize=14.0 K 2024-11-23T03:23:00,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65be7c0150514d378c8b66c3de5a30ba as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba 2024-11-23T03:23:00,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:23:00,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f6e19c6790814b79b747e07d8652d1f6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6 2024-11-23T03:23:00,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T03:23:00,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1021ms, sequenceid=90, compaction requested=true 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:00,950 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:00,950 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:00,951 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:00,951 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:00,951 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,951 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/32176c09b4224e908e64458f9e81780a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.3 K 2024-11-23T03:23:00,952 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:00,952 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:00,952 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:00,952 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/cde7911cb60b4f8491f59809e71b7285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.5 K 2024-11-23T03:23:00,952 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting cde7911cb60b4f8491f59809e71b7285, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:23:00,952 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 32176c09b4224e908e64458f9e81780a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:23:00,953 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1eee56b2fa144184b04b1586410b5242, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332178654 2024-11-23T03:23:00,953 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 818f055a9051476a8461d0805b755472, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332178654 2024-11-23T03:23:00,953 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 65be7c0150514d378c8b66c3de5a30ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179304 2024-11-23T03:23:00,953 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d1bcb44daa34d1294bd043a755fd217, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179300 2024-11-23T03:23:00,962 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#416 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:00,962 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/314c4034a4c14b0a97656a08bd4ca6f7 is 50, key is test_row_0/A:col10/1732332179304/Put/seqid=0 2024-11-23T03:23:00,977 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:00,977 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/cf60454ee79e4ba7a1693afa56ff8d8d is 50, key is test_row_0/B:col10/1732332179304/Put/seqid=0 2024-11-23T03:23:00,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742319_1495 (size=12207) 2024-11-23T03:23:00,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742320_1496 (size=12207) 2024-11-23T03:23:01,002 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/cf60454ee79e4ba7a1693afa56ff8d8d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cf60454ee79e4ba7a1693afa56ff8d8d 2024-11-23T03:23:01,008 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into cf60454ee79e4ba7a1693afa56ff8d8d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:01,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:01,009 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332180950; duration=0sec 2024-11-23T03:23:01,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:01,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:01,009 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:01,011 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:01,011 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:01,011 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:01,012 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e7337271593d4df2a5ce1449a46db554, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.3 K 2024-11-23T03:23:01,012 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e7337271593d4df2a5ce1449a46db554, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732332177525 2024-11-23T03:23:01,012 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 46890ed0d05c421a9e00397a3a92ddf8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732332178654 2024-11-23T03:23:01,013 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f6e19c6790814b79b747e07d8652d1f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179304 2024-11-23T03:23:01,022 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:01,022 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/7629c27b27484e7b8df440e08ea523ac is 50, key is test_row_0/C:col10/1732332179304/Put/seqid=0 2024-11-23T03:23:01,054 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T03:23:01,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:01,055 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:01,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:01,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742321_1497 (size=12207) 2024-11-23T03:23:01,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/5da52673950c4ddbb2ad59c226f1f7be is 50, key is test_row_0/A:col10/1732332180030/Put/seqid=0 2024-11-23T03:23:01,070 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/7629c27b27484e7b8df440e08ea523ac as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7629c27b27484e7b8df440e08ea523ac 2024-11-23T03:23:01,075 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 7629c27b27484e7b8df440e08ea523ac(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:01,075 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:01,075 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332180950; duration=0sec 2024-11-23T03:23:01,075 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:01,075 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:01,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742322_1498 (size=12001) 2024-11-23T03:23:01,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:01,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332241182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332241183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332241185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:01,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332241289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332241291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332241291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,386 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/314c4034a4c14b0a97656a08bd4ca6f7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/314c4034a4c14b0a97656a08bd4ca6f7 2024-11-23T03:23:01,392 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 314c4034a4c14b0a97656a08bd4ca6f7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:01,392 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:01,392 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332180950; duration=0sec 2024-11-23T03:23:01,392 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:01,392 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:01,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332241494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332241496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332241496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,514 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/5da52673950c4ddbb2ad59c226f1f7be 2024-11-23T03:23:01,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/49180ed55ac3471e8491adcf3097f38b is 50, key is test_row_0/B:col10/1732332180030/Put/seqid=0 2024-11-23T03:23:01,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742323_1499 (size=12001) 2024-11-23T03:23:01,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332241686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,692 DEBUG [Thread-2121 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:01,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:01,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332241705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,711 DEBUG [Thread-2123 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:01,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332241800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332241801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332241802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:01,980 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/49180ed55ac3471e8491adcf3097f38b 2024-11-23T03:23:01,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/4d106749dcbc4cf3a8aac99d14e90d14 is 50, key is test_row_0/C:col10/1732332180030/Put/seqid=0 2024-11-23T03:23:02,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742324_1500 (size=12001) 2024-11-23T03:23:02,049 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/4d106749dcbc4cf3a8aac99d14e90d14 2024-11-23T03:23:02,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/5da52673950c4ddbb2ad59c226f1f7be as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be 2024-11-23T03:23:02,064 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be, entries=150, sequenceid=116, filesize=11.7 K 2024-11-23T03:23:02,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/49180ed55ac3471e8491adcf3097f38b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b 2024-11-23T03:23:02,073 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b, entries=150, sequenceid=116, filesize=11.7 K 2024-11-23T03:23:02,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/4d106749dcbc4cf3a8aac99d14e90d14 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14 2024-11-23T03:23:02,082 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14, entries=150, sequenceid=116, filesize=11.7 K 2024-11-23T03:23:02,083 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1028ms, sequenceid=116, compaction requested=false 2024-11-23T03:23:02,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:02,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-23T03:23:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-23T03:23:02,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-23T03:23:02,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4910 sec 2024-11-23T03:23:02,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4960 sec 2024-11-23T03:23:02,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T03:23:02,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:02,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:02,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:02,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:02,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/528672d0365d4506891fd42afbb92942 is 50, key is test_row_0/A:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742325_1501 (size=14391) 2024-11-23T03:23:02,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/528672d0365d4506891fd42afbb92942 2024-11-23T03:23:02,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332242372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332242382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332242383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b22f4befde6f43cbbebadea3ae9a73cf is 50, key is test_row_0/B:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742326_1502 (size=12051) 2024-11-23T03:23:02,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b22f4befde6f43cbbebadea3ae9a73cf 2024-11-23T03:23:02,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/97c45b5a6dbc4277acc4c135d2b890e8 is 50, key is test_row_0/C:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332242483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742327_1503 (size=12051) 2024-11-23T03:23:02,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/97c45b5a6dbc4277acc4c135d2b890e8 2024-11-23T03:23:02,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/528672d0365d4506891fd42afbb92942 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942 2024-11-23T03:23:02,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332242493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332242494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942, entries=200, sequenceid=130, filesize=14.1 K 2024-11-23T03:23:02,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/b22f4befde6f43cbbebadea3ae9a73cf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf 2024-11-23T03:23:02,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf, entries=150, sequenceid=130, filesize=11.8 K 2024-11-23T03:23:02,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/97c45b5a6dbc4277acc4c135d2b890e8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8 2024-11-23T03:23:02,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8, entries=150, sequenceid=130, filesize=11.8 K 2024-11-23T03:23:02,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 6a5f5cf259ce583546a3c6bfa36ac47c in 214ms, sequenceid=130, compaction requested=true 2024-11-23T03:23:02,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:02,522 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:02,523 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:02,523 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:02,523 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,523 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/314c4034a4c14b0a97656a08bd4ca6f7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.7 K 2024-11-23T03:23:02,523 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 314c4034a4c14b0a97656a08bd4ca6f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179304 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:02,524 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5da52673950c4ddbb2ad59c226f1f7be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732332180006 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:02,524 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 528672d0365d4506891fd42afbb92942, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181173 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:02,524 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:02,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:02,526 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:02,526 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:02,526 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,526 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cf60454ee79e4ba7a1693afa56ff8d8d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.4 K 2024-11-23T03:23:02,527 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cf60454ee79e4ba7a1693afa56ff8d8d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179304 2024-11-23T03:23:02,528 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 49180ed55ac3471e8491adcf3097f38b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732332180006 2024-11-23T03:23:02,528 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b22f4befde6f43cbbebadea3ae9a73cf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181181 2024-11-23T03:23:02,539 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#425 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:02,539 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/b3422d4e4c2540958e6ff25b169e98d7 is 50, key is test_row_0/A:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,551 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:02,552 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/06397bcc81bb4338877f79d58409396e is 50, key is test_row_0/B:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742328_1504 (size=12359) 2024-11-23T03:23:02,598 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/b3422d4e4c2540958e6ff25b169e98d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b3422d4e4c2540958e6ff25b169e98d7 2024-11-23T03:23:02,604 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into b3422d4e4c2540958e6ff25b169e98d7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:02,604 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:02,605 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332182522; duration=0sec 2024-11-23T03:23:02,605 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:02,605 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:02,606 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:02,607 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:02,608 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:02,608 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,608 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7629c27b27484e7b8df440e08ea523ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=35.4 K 2024-11-23T03:23:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742329_1505 (size=12359) 2024-11-23T03:23:02,613 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7629c27b27484e7b8df440e08ea523ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732332179304 2024-11-23T03:23:02,613 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d106749dcbc4cf3a8aac99d14e90d14, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732332180006 2024-11-23T03:23:02,614 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97c45b5a6dbc4277acc4c135d2b890e8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181181 2024-11-23T03:23:02,629 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/06397bcc81bb4338877f79d58409396e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/06397bcc81bb4338877f79d58409396e 2024-11-23T03:23:02,638 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:02,638 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 06397bcc81bb4338877f79d58409396e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:02,639 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:02,639 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332182524; duration=0sec 2024-11-23T03:23:02,639 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/9075986907d04777861cc248b221d272 is 50, key is test_row_0/C:col10/1732332181181/Put/seqid=0 2024-11-23T03:23:02,639 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:02,639 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:02,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742330_1506 (size=12359) 2024-11-23T03:23:02,677 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/9075986907d04777861cc248b221d272 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/9075986907d04777861cc248b221d272 2024-11-23T03:23:02,683 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 9075986907d04777861cc248b221d272(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:02,683 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:02,683 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332182524; duration=0sec 2024-11-23T03:23:02,684 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:02,684 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T03:23:02,697 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-23T03:23:02,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:02,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-23T03:23:02,700 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:02,701 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:02,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:02,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:02,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:23:02,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:02,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:02,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/66da570d51724520890fa0be90a0afcb is 50, key is test_row_0/A:col10/1732332182701/Put/seqid=0 2024-11-23T03:23:02,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332242715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332242717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332242719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742331_1507 (size=14541) 2024-11-23T03:23:02,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/66da570d51724520890fa0be90a0afcb 2024-11-23T03:23:02,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a4153b374a7441e39b23d290f550f6ab is 50, key is test_row_0/B:col10/1732332182701/Put/seqid=0 2024-11-23T03:23:02,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742332_1508 (size=12151) 2024-11-23T03:23:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:02,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a4153b374a7441e39b23d290f550f6ab 2024-11-23T03:23:02,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332242820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332242821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c54d99310cb447a5bc842f52b525275d is 50, key is test_row_0/C:col10/1732332182701/Put/seqid=0 2024-11-23T03:23:02,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:02,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332242828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,854 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:02,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T03:23:02,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:02,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:02,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:02,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:02,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:02,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742333_1509 (size=12151) 2024-11-23T03:23:02,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c54d99310cb447a5bc842f52b525275d 2024-11-23T03:23:02,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/66da570d51724520890fa0be90a0afcb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb 2024-11-23T03:23:02,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb, entries=200, sequenceid=159, filesize=14.2 K 2024-11-23T03:23:02,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a4153b374a7441e39b23d290f550f6ab as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab 2024-11-23T03:23:02,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab, entries=150, sequenceid=159, filesize=11.9 K 2024-11-23T03:23:02,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c54d99310cb447a5bc842f52b525275d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d 2024-11-23T03:23:02,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d, entries=150, sequenceid=159, filesize=11.9 K 2024-11-23T03:23:02,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6a5f5cf259ce583546a3c6bfa36ac47c in 193ms, sequenceid=159, compaction requested=false 2024-11-23T03:23:02,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:03,009 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:03,010 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:03,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:03,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec612e5966e84d7e867f18acee6c9fc7 is 50, key is test_row_0/A:col10/1732332182713/Put/seqid=0 2024-11-23T03:23:03,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:03,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742334_1510 (size=12151) 2024-11-23T03:23:03,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332243098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332243099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332243101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332243210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332243210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332243211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:03,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332243417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332243419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332243419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,468 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec612e5966e84d7e867f18acee6c9fc7 2024-11-23T03:23:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3258694ee8b848a7bccf3bb226235336 is 50, key is test_row_0/B:col10/1732332182713/Put/seqid=0 2024-11-23T03:23:03,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742335_1511 (size=12151) 2024-11-23T03:23:03,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332243721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332243725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332243726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:03,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:03,902 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3258694ee8b848a7bccf3bb226235336 2024-11-23T03:23:03,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/a7a0eede18b84b898877a6a512a0b060 is 50, key is test_row_0/C:col10/1732332182713/Put/seqid=0 2024-11-23T03:23:03,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742336_1512 (size=12151) 2024-11-23T03:23:03,977 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/a7a0eede18b84b898877a6a512a0b060 2024-11-23T03:23:03,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec612e5966e84d7e867f18acee6c9fc7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7 2024-11-23T03:23:03,992 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:23:03,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3258694ee8b848a7bccf3bb226235336 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336 2024-11-23T03:23:04,001 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:23:04,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/a7a0eede18b84b898877a6a512a0b060 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060 2024-11-23T03:23:04,015 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T03:23:04,016 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1005ms, sequenceid=169, compaction requested=true 2024-11-23T03:23:04,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:04,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-23T03:23:04,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-23T03:23:04,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-23T03:23:04,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3160 sec 2024-11-23T03:23:04,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.3200 sec 2024-11-23T03:23:04,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:04,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/9f0ac177ae3f4bc9a0f9db4af35070af is 50, key is test_row_0/A:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332244245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742337_1513 (size=14541) 2024-11-23T03:23:04,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332244248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/9f0ac177ae3f4bc9a0f9db4af35070af 2024-11-23T03:23:04,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332244248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/d64d98a9bda241e1b8ad02b3fbe0664c is 50, key is test_row_0/B:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742338_1514 (size=12151) 2024-11-23T03:23:04,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/d64d98a9bda241e1b8ad02b3fbe0664c 2024-11-23T03:23:04,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332244352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/41b63e648417486ab18a4c5d319239aa is 50, key is test_row_0/C:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332244374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332244375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742339_1515 (size=12151) 2024-11-23T03:23:04,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/41b63e648417486ab18a4c5d319239aa 2024-11-23T03:23:04,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/9f0ac177ae3f4bc9a0f9db4af35070af as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af 2024-11-23T03:23:04,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af, entries=200, sequenceid=196, filesize=14.2 K 2024-11-23T03:23:04,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/d64d98a9bda241e1b8ad02b3fbe0664c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c 2024-11-23T03:23:04,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T03:23:04,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/41b63e648417486ab18a4c5d319239aa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa 2024-11-23T03:23:04,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T03:23:04,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6a5f5cf259ce583546a3c6bfa36ac47c in 184ms, sequenceid=196, compaction requested=true 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:04,419 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:04,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:23:04,419 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:04,420 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53592 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:04,420 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:04,420 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,420 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b3422d4e4c2540958e6ff25b169e98d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=52.3 K 2024-11-23T03:23:04,421 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:04,421 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3422d4e4c2540958e6ff25b169e98d7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181181 2024-11-23T03:23:04,421 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:04,421 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,421 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/06397bcc81bb4338877f79d58409396e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=47.7 K 2024-11-23T03:23:04,421 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66da570d51724520890fa0be90a0afcb, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332182359 2024-11-23T03:23:04,422 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 06397bcc81bb4338877f79d58409396e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181181 2024-11-23T03:23:04,422 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec612e5966e84d7e867f18acee6c9fc7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332182709 2024-11-23T03:23:04,422 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a4153b374a7441e39b23d290f550f6ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332182380 2024-11-23T03:23:04,422 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f0ac177ae3f4bc9a0f9db4af35070af, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183088 2024-11-23T03:23:04,422 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3258694ee8b848a7bccf3bb226235336, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332182709 2024-11-23T03:23:04,423 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d64d98a9bda241e1b8ad02b3fbe0664c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183096 2024-11-23T03:23:04,447 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#437 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:04,447 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/017ded80f3ef4523a15a4c73804c5112 is 50, key is test_row_0/A:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,461 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:04,462 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/97c3e4c42a874195aa6ef6a54f406453 is 50, key is test_row_0/B:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742340_1516 (size=12595) 2024-11-23T03:23:04,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742341_1517 (size=12595) 2024-11-23T03:23:04,539 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/97c3e4c42a874195aa6ef6a54f406453 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/97c3e4c42a874195aa6ef6a54f406453 2024-11-23T03:23:04,543 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 97c3e4c42a874195aa6ef6a54f406453(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:04,544 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:04,544 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=12, startTime=1732332184419; duration=0sec 2024-11-23T03:23:04,544 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:04,544 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:04,544 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:04,546 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:04,546 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:04,546 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,546 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/9075986907d04777861cc248b221d272, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=47.7 K 2024-11-23T03:23:04,548 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9075986907d04777861cc248b221d272, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732332181181 2024-11-23T03:23:04,548 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c54d99310cb447a5bc842f52b525275d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732332182380 2024-11-23T03:23:04,548 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a7a0eede18b84b898877a6a512a0b060, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732332182709 2024-11-23T03:23:04,549 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 41b63e648417486ab18a4c5d319239aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183096 2024-11-23T03:23:04,558 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:04,559 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/18ebbbe4e16442a0bbf75b5b392db4b2 is 50, key is test_row_0/C:col10/1732332183099/Put/seqid=0 2024-11-23T03:23:04,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:04,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742342_1518 (size=12595) 2024-11-23T03:23:04,597 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/18ebbbe4e16442a0bbf75b5b392db4b2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/18ebbbe4e16442a0bbf75b5b392db4b2 2024-11-23T03:23:04,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/c6d5f835b02f4d158d1a71a4f6d61269 is 50, key is test_row_0/A:col10/1732332184563/Put/seqid=0 2024-11-23T03:23:04,605 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 18ebbbe4e16442a0bbf75b5b392db4b2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:04,605 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:04,605 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=12, startTime=1732332184419; duration=0sec 2024-11-23T03:23:04,605 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:04,605 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742343_1519 (size=12147) 2024-11-23T03:23:04,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332244645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332244646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332244649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332244750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332244754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332244760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T03:23:04,807 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-23T03:23:04,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:04,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-23T03:23:04,810 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:04,811 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:04,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:04,916 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/017ded80f3ef4523a15a4c73804c5112 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/017ded80f3ef4523a15a4c73804c5112 2024-11-23T03:23:04,921 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 017ded80f3ef4523a15a4c73804c5112(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:04,921 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:04,921 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=12, startTime=1732332184419; duration=0sec 2024-11-23T03:23:04,921 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:04,921 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:04,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332244955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,963 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:04,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:04,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:04,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:04,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:04,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:04,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332244960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:04,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332244968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/c6d5f835b02f4d158d1a71a4f6d61269 2024-11-23T03:23:05,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/88ec852c218748b6b06b48281dbb384e is 50, key is test_row_0/B:col10/1732332184563/Put/seqid=0 2024-11-23T03:23:05,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742344_1520 (size=9757) 2024-11-23T03:23:05,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/88ec852c218748b6b06b48281dbb384e 2024-11-23T03:23:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:05,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/2a2f3f19b21049aca85a7d6edd56af89 is 50, key is test_row_0/C:col10/1732332184563/Put/seqid=0 2024-11-23T03:23:05,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742345_1521 (size=9757) 2024-11-23T03:23:05,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/2a2f3f19b21049aca85a7d6edd56af89 2024-11-23T03:23:05,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/c6d5f835b02f4d158d1a71a4f6d61269 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269 2024-11-23T03:23:05,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269, entries=150, sequenceid=208, filesize=11.9 K 2024-11-23T03:23:05,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/88ec852c218748b6b06b48281dbb384e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e 2024-11-23T03:23:05,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e, entries=100, sequenceid=208, filesize=9.5 K 2024-11-23T03:23:05,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/2a2f3f19b21049aca85a7d6edd56af89 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89 2024-11-23T03:23:05,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89, entries=100, sequenceid=208, filesize=9.5 K 2024-11-23T03:23:05,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a5f5cf259ce583546a3c6bfa36ac47c in 623ms, sequenceid=208, compaction requested=false 2024-11-23T03:23:05,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:05,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:05,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:05,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:05,273 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1f7fa71a890d4e10bd0f4d9b928bfe90 is 50, key is test_row_0/A:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:05,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742346_1522 (size=14541) 2024-11-23T03:23:05,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1f7fa71a890d4e10bd0f4d9b928bfe90 2024-11-23T03:23:05,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332245288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332245292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332245296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3e6f9abe4dd84a63bd81e51f7924b837 is 50, key is test_row_0/B:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:05,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742347_1523 (size=12151) 2024-11-23T03:23:05,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332245399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332245401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332245408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:05,427 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332245607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332245610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332245613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332245706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,724 DEBUG [Thread-2121 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:05,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3e6f9abe4dd84a63bd81e51f7924b837 2024-11-23T03:23:05,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332245742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,749 DEBUG [Thread-2123 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8218 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., hostname=0d51875c74df,34141,1732332039937, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:05,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/e8bb6686a45a4415a3f2a2e6652abde1 is 50, key is test_row_0/C:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:05,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742348_1524 (size=12151) 2024-11-23T03:23:05,886 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:05,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:05,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:05,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:05,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332245915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332245918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:05,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332245921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:06,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:06,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:06,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/e8bb6686a45a4415a3f2a2e6652abde1 2024-11-23T03:23:06,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/1f7fa71a890d4e10bd0f4d9b928bfe90 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90 2024-11-23T03:23:06,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90, entries=200, sequenceid=236, filesize=14.2 K 2024-11-23T03:23:06,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3e6f9abe4dd84a63bd81e51f7924b837 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837 2024-11-23T03:23:06,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837, entries=150, sequenceid=236, filesize=11.9 K 2024-11-23T03:23:06,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/e8bb6686a45a4415a3f2a2e6652abde1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1 2024-11-23T03:23:06,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1, entries=150, sequenceid=236, filesize=11.9 K 2024-11-23T03:23:06,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6a5f5cf259ce583546a3c6bfa36ac47c in 965ms, sequenceid=236, compaction requested=true 2024-11-23T03:23:06,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:06,234 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:06,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:23:06,234 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:06,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:06,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:06,235 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,235 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/017ded80f3ef4523a15a4c73804c5112, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=38.4 K 2024-11-23T03:23:06,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:06,235 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:06,236 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,236 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/97c3e4c42a874195aa6ef6a54f406453, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=33.7 K 2024-11-23T03:23:06,236 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 017ded80f3ef4523a15a4c73804c5112, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183096 2024-11-23T03:23:06,236 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 97c3e4c42a874195aa6ef6a54f406453, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183096 2024-11-23T03:23:06,236 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 88ec852c218748b6b06b48281dbb384e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732332184558 2024-11-23T03:23:06,237 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6d5f835b02f4d158d1a71a4f6d61269, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732332184240 2024-11-23T03:23:06,237 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e6f9abe4dd84a63bd81e51f7924b837, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:06,237 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f7fa71a890d4e10bd0f4d9b928bfe90, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:06,256 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#446 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:06,256 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/a8fdd84621d240839e6cd1a03ca4fd68 is 50, key is test_row_0/A:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:06,270 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:06,270 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/df2123923e1a46f69e28ec7cbe9c831e is 50, key is test_row_0/B:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:06,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742350_1526 (size=12697) 2024-11-23T03:23:06,306 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/df2123923e1a46f69e28ec7cbe9c831e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/df2123923e1a46f69e28ec7cbe9c831e 2024-11-23T03:23:06,312 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into df2123923e1a46f69e28ec7cbe9c831e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:06,312 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:06,312 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332186234; duration=0sec 2024-11-23T03:23:06,312 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:06,312 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:06,313 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:06,317 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:06,317 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:06,317 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,317 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/18ebbbe4e16442a0bbf75b5b392db4b2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=33.7 K 2024-11-23T03:23:06,319 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 18ebbbe4e16442a0bbf75b5b392db4b2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732332183096 2024-11-23T03:23:06,319 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a2f3f19b21049aca85a7d6edd56af89, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732332184558 2024-11-23T03:23:06,320 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e8bb6686a45a4415a3f2a2e6652abde1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:06,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742349_1525 (size=12697) 2024-11-23T03:23:06,334 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/a8fdd84621d240839e6cd1a03ca4fd68 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/a8fdd84621d240839e6cd1a03ca4fd68 2024-11-23T03:23:06,339 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into a8fdd84621d240839e6cd1a03ca4fd68(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:06,339 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:06,339 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332186233; duration=0sec 2024-11-23T03:23:06,340 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:06,340 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:06,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T03:23:06,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,348 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:06,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:06,356 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#448 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:06,357 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/0c706ced1a1443a89ad81c572bdd52fc is 50, key is test_row_0/C:col10/1732332184647/Put/seqid=0 2024-11-23T03:23:06,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2c46abb20396412b88fb77b8bc1356e9 is 50, key is test_row_0/A:col10/1732332185289/Put/seqid=0 2024-11-23T03:23:06,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742351_1527 (size=12697) 2024-11-23T03:23:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:06,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:06,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742352_1528 (size=12151) 2024-11-23T03:23:06,424 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2c46abb20396412b88fb77b8bc1356e9 2024-11-23T03:23:06,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/e13b801bfc844d1aadc6f2f6315a2cd3 is 50, key is test_row_0/B:col10/1732332185289/Put/seqid=0 2024-11-23T03:23:06,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742353_1529 (size=12151) 2024-11-23T03:23:06,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332246481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332246487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332246490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332246593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332246597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332246597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332246800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332246803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332246804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:06,813 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/0c706ced1a1443a89ad81c572bdd52fc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/0c706ced1a1443a89ad81c572bdd52fc 2024-11-23T03:23:06,819 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 0c706ced1a1443a89ad81c572bdd52fc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:06,819 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:06,819 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332186234; duration=0sec 2024-11-23T03:23:06,819 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:06,819 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:06,872 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/e13b801bfc844d1aadc6f2f6315a2cd3 2024-11-23T03:23:06,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c30ac18ac5954c65be0313f8180e326d is 50, key is test_row_0/C:col10/1732332185289/Put/seqid=0 2024-11-23T03:23:06,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742354_1530 (size=12151) 2024-11-23T03:23:06,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c30ac18ac5954c65be0313f8180e326d 2024-11-23T03:23:06,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:06,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/2c46abb20396412b88fb77b8bc1356e9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9 2024-11-23T03:23:06,927 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:23:06,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/e13b801bfc844d1aadc6f2f6315a2cd3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3 2024-11-23T03:23:06,932 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:23:06,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c30ac18ac5954c65be0313f8180e326d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d 2024-11-23T03:23:06,939 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T03:23:06,940 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 6a5f5cf259ce583546a3c6bfa36ac47c in 592ms, sequenceid=248, compaction requested=false 2024-11-23T03:23:06,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:06,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:06,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-23T03:23:06,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-23T03:23:06,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-23T03:23:06,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1300 sec 2024-11-23T03:23:06,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.1350 sec 2024-11-23T03:23:07,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:07,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:07,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fe5d07ab92804a3bafb0cbd116bd6c52 is 50, key is test_row_0/A:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332247130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332247132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332247132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742355_1531 (size=12301) 2024-11-23T03:23:07,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332247235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332247242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332247242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332247442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332247447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332247447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fe5d07ab92804a3bafb0cbd116bd6c52 2024-11-23T03:23:07,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/535db1960b554f2390558ae3147fe054 is 50, key is test_row_0/B:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742356_1532 (size=12301) 2024-11-23T03:23:07,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/535db1960b554f2390558ae3147fe054 2024-11-23T03:23:07,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/96442fd556f84b428bfafe5024eaba78 is 50, key is test_row_0/C:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742357_1533 (size=12301) 2024-11-23T03:23:07,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/96442fd556f84b428bfafe5024eaba78 2024-11-23T03:23:07,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fe5d07ab92804a3bafb0cbd116bd6c52 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52 2024-11-23T03:23:07,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:23:07,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/535db1960b554f2390558ae3147fe054 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054 2024-11-23T03:23:07,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:23:07,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/96442fd556f84b428bfafe5024eaba78 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78 2024-11-23T03:23:07,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:23:07,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6a5f5cf259ce583546a3c6bfa36ac47c in 586ms, sequenceid=276, compaction requested=true 2024-11-23T03:23:07,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:07,703 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:07,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:23:07,703 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:07,714 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:07,714 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:07,714 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:07,714 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:07,714 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:07,714 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/a8fdd84621d240839e6cd1a03ca4fd68, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.3 K 2024-11-23T03:23:07,714 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:07,714 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/df2123923e1a46f69e28ec7cbe9c831e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.3 K 2024-11-23T03:23:07,715 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting df2123923e1a46f69e28ec7cbe9c831e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:07,715 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a8fdd84621d240839e6cd1a03ca4fd68, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:07,716 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e13b801bfc844d1aadc6f2f6315a2cd3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332185280 2024-11-23T03:23:07,716 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 535db1960b554f2390558ae3147fe054, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:07,720 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c46abb20396412b88fb77b8bc1356e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332185280 2024-11-23T03:23:07,720 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fe5d07ab92804a3bafb0cbd116bd6c52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:07,730 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:07,730 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/944c395f1def4949abf0efb164f82b9d is 50, key is test_row_0/B:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,746 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#456 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:07,746 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d5a8f74774b647ed857e2f9a5a738285 is 50, key is test_row_0/A:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:07,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:23:07,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:07,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742358_1534 (size=12949) 2024-11-23T03:23:07,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742359_1535 (size=12949) 2024-11-23T03:23:07,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ce20ad56aba94afbbd617fdd80005004 is 50, key is test_row_0/A:col10/1732332187130/Put/seqid=0 2024-11-23T03:23:07,777 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d5a8f74774b647ed857e2f9a5a738285 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d5a8f74774b647ed857e2f9a5a738285 2024-11-23T03:23:07,784 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into d5a8f74774b647ed857e2f9a5a738285(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:07,784 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:07,784 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332187703; duration=0sec 2024-11-23T03:23:07,784 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:07,784 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:07,784 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:07,786 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:07,786 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:07,786 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:07,786 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/0c706ced1a1443a89ad81c572bdd52fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.3 K 2024-11-23T03:23:07,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c706ced1a1443a89ad81c572bdd52fc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332184643 2024-11-23T03:23:07,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c30ac18ac5954c65be0313f8180e326d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732332185280 2024-11-23T03:23:07,787 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 96442fd556f84b428bfafe5024eaba78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:07,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742360_1536 (size=12301) 2024-11-23T03:23:07,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ce20ad56aba94afbbd617fdd80005004 2024-11-23T03:23:07,804 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:07,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/05bfaba224c14e8d86daf94353139d86 is 50, key is test_row_0/C:col10/1732332186486/Put/seqid=0 2024-11-23T03:23:07,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a9453d8de9e240968211c31680e8c642 is 50, key is test_row_0/B:col10/1732332187130/Put/seqid=0 2024-11-23T03:23:07,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332247840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742361_1537 (size=12949) 2024-11-23T03:23:07,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332247843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332247844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,859 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/05bfaba224c14e8d86daf94353139d86 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/05bfaba224c14e8d86daf94353139d86 2024-11-23T03:23:07,864 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 05bfaba224c14e8d86daf94353139d86(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:07,864 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:07,864 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332187703; duration=0sec 2024-11-23T03:23:07,864 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:07,864 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:07,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742362_1538 (size=12301) 2024-11-23T03:23:07,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a9453d8de9e240968211c31680e8c642 2024-11-23T03:23:07,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/91a00fb6369c4afe9fb52a009b3fa901 is 50, key is test_row_0/C:col10/1732332187130/Put/seqid=0 2024-11-23T03:23:07,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742363_1539 (size=12301) 2024-11-23T03:23:07,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/91a00fb6369c4afe9fb52a009b3fa901 2024-11-23T03:23:07,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ce20ad56aba94afbbd617fdd80005004 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004 2024-11-23T03:23:07,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332247951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T03:23:07,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/a9453d8de9e240968211c31680e8c642 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642 2024-11-23T03:23:07,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332247955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332247956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:07,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T03:23:07,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/91a00fb6369c4afe9fb52a009b3fa901 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901 2024-11-23T03:23:07,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T03:23:07,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a5f5cf259ce583546a3c6bfa36ac47c in 216ms, sequenceid=287, compaction requested=false 2024-11-23T03:23:07,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:08,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T03:23:08,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:08,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8824f3cf1bda4ee1a25cb1fa2ed8806b is 50, key is test_row_0/A:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:08,170 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/944c395f1def4949abf0efb164f82b9d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/944c395f1def4949abf0efb164f82b9d 2024-11-23T03:23:08,176 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 944c395f1def4949abf0efb164f82b9d(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:08,177 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:08,177 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332187703; duration=0sec 2024-11-23T03:23:08,177 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:08,177 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:08,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332248177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332248178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332248184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742364_1540 (size=14741) 2024-11-23T03:23:08,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8824f3cf1bda4ee1a25cb1fa2ed8806b 2024-11-23T03:23:08,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/006aec99cdce4483ae0f9c2232713e05 is 50, key is test_row_0/B:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:08,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742365_1541 (size=12301) 2024-11-23T03:23:08,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:23:08,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332248286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332248286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332248293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332248492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332248493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332248495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/006aec99cdce4483ae0f9c2232713e05 2024-11-23T03:23:08,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c8c10b9e583648499a18be08d1977b46 is 50, key is test_row_0/C:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:08,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742366_1542 (size=12301) 2024-11-23T03:23:08,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332248798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332248800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332248802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T03:23:08,916 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-23T03:23:08,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-23T03:23:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T03:23:08,920 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:08,922 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:08,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T03:23:09,074 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T03:23:09,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:09,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c8c10b9e583648499a18be08d1977b46 2024-11-23T03:23:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8824f3cf1bda4ee1a25cb1fa2ed8806b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b 2024-11-23T03:23:09,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b, entries=200, sequenceid=315, filesize=14.4 K 2024-11-23T03:23:09,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/006aec99cdce4483ae0f9c2232713e05 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05 2024-11-23T03:23:09,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05, entries=150, sequenceid=315, filesize=12.0 K 2024-11-23T03:23:09,141 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:23:09,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/c8c10b9e583648499a18be08d1977b46 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46 2024-11-23T03:23:09,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46, entries=150, sequenceid=315, filesize=12.0 K 2024-11-23T03:23:09,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6a5f5cf259ce583546a3c6bfa36ac47c in 993ms, sequenceid=315, compaction requested=true 2024-11-23T03:23:09,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:09,153 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:09,154 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:09,154 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:09,154 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,155 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d5a8f74774b647ed857e2f9a5a738285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=39.1 K 2024-11-23T03:23:09,155 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5a8f74774b647ed857e2f9a5a738285, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:09,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:09,156 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce20ad56aba94afbbd617fdd80005004, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732332187130 2024-11-23T03:23:09,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:09,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:09,156 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:09,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:09,157 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8824f3cf1bda4ee1a25cb1fa2ed8806b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:09,158 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:09,158 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:09,158 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,158 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/944c395f1def4949abf0efb164f82b9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.7 K 2024-11-23T03:23:09,160 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 944c395f1def4949abf0efb164f82b9d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:09,160 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a9453d8de9e240968211c31680e8c642, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732332187130 2024-11-23T03:23:09,161 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 006aec99cdce4483ae0f9c2232713e05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:09,182 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:09,183 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/9f9ecb3da1fb44e4b16dcc946e15dfd1 is 50, key is test_row_0/A:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:09,197 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:09,197 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3e28a2978cf843fb91fd0154fe919617 is 50, key is test_row_0/B:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:09,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T03:23:09,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,229 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:09,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742367_1543 (size=13051) 2024-11-23T03:23:09,263 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/9f9ecb3da1fb44e4b16dcc946e15dfd1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f9ecb3da1fb44e4b16dcc946e15dfd1 2024-11-23T03:23:09,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742368_1544 (size=13051) 2024-11-23T03:23:09,270 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 9f9ecb3da1fb44e4b16dcc946e15dfd1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:09,270 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:09,270 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332189153; duration=0sec 2024-11-23T03:23:09,270 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:09,270 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:09,270 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:09,271 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:09,271 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:09,271 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,271 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/05bfaba224c14e8d86daf94353139d86, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.7 K 2024-11-23T03:23:09,271 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05bfaba224c14e8d86daf94353139d86, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332186486 2024-11-23T03:23:09,272 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91a00fb6369c4afe9fb52a009b3fa901, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732332187130 2024-11-23T03:23:09,272 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8c10b9e583648499a18be08d1977b46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:09,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/86d4c05f004b49cba5a048993c5923d6 is 50, key is test_row_1/A:col10/1732332188181/Put/seqid=0 2024-11-23T03:23:09,299 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#467 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:09,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/da29af4faea248ca878dff5a32f3b14a is 50, key is test_row_0/C:col10/1732332188157/Put/seqid=0 2024-11-23T03:23:09,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:09,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:09,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742369_1545 (size=9857) 2024-11-23T03:23:09,319 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/86d4c05f004b49cba5a048993c5923d6 2024-11-23T03:23:09,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6341df4adef349ff8af1f5b6ab68e936 is 50, key is test_row_1/B:col10/1732332188181/Put/seqid=0 2024-11-23T03:23:09,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742370_1546 (size=13051) 2024-11-23T03:23:09,364 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/da29af4faea248ca878dff5a32f3b14a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/da29af4faea248ca878dff5a32f3b14a 2024-11-23T03:23:09,369 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into da29af4faea248ca878dff5a32f3b14a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:09,369 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:09,369 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332189157; duration=0sec 2024-11-23T03:23:09,369 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:09,369 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:09,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742371_1547 (size=9857) 2024-11-23T03:23:09,379 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6341df4adef349ff8af1f5b6ab68e936 2024-11-23T03:23:09,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f32f6be15c594023b68ef0b8585f3054 is 50, key is test_row_1/C:col10/1732332188181/Put/seqid=0 2024-11-23T03:23:09,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332249396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332249402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332249403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742372_1548 (size=9857) 2024-11-23T03:23:09,433 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f32f6be15c594023b68ef0b8585f3054 2024-11-23T03:23:09,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/86d4c05f004b49cba5a048993c5923d6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6 2024-11-23T03:23:09,445 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6, entries=100, sequenceid=326, filesize=9.6 K 2024-11-23T03:23:09,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6341df4adef349ff8af1f5b6ab68e936 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936 2024-11-23T03:23:09,462 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936, entries=100, sequenceid=326, filesize=9.6 K 2024-11-23T03:23:09,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/f32f6be15c594023b68ef0b8585f3054 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054 2024-11-23T03:23:09,472 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054, entries=100, sequenceid=326, filesize=9.6 K 2024-11-23T03:23:09,473 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 6a5f5cf259ce583546a3c6bfa36ac47c in 244ms, sequenceid=326, compaction requested=false 2024-11-23T03:23:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-23T03:23:09,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-23T03:23:09,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-23T03:23:09,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 553 msec 2024-11-23T03:23:09,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 559 msec 2024-11-23T03:23:09,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:09,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T03:23:09,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:09,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:09,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:09,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:09,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e97c6a31469547c5bb516013c4259aa0 is 50, key is test_row_0/A:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T03:23:09,522 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-23T03:23:09,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-23T03:23:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T03:23:09,525 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:09,526 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:09,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332249520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332249524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332249525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742373_1549 (size=14741) 2024-11-23T03:23:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T03:23:09,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332249627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332249635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332249635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,673 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3e28a2978cf843fb91fd0154fe919617 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e28a2978cf843fb91fd0154fe919617 2024-11-23T03:23:09,677 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T03:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,681 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 3e28a2978cf843fb91fd0154fe919617(size=12.7 K), total size for store is 22.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:09,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:09,681 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332189156; duration=0sec 2024-11-23T03:23:09,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:09,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T03:23:09,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T03:23:09,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:09,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332249833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332249840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332249841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e97c6a31469547c5bb516013c4259aa0 2024-11-23T03:23:09,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/43c3a73c89354394b2699439305b1c1f is 50, key is test_row_0/B:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:09,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742374_1550 (size=12301) 2024-11-23T03:23:09,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/43c3a73c89354394b2699439305b1c1f 2024-11-23T03:23:09,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:09,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T03:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:09,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/80da2f6cf3724f7c84745ae961e7d64a is 50, key is test_row_0/C:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:10,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742375_1551 (size=12301) 2024-11-23T03:23:10,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/80da2f6cf3724f7c84745ae961e7d64a 2024-11-23T03:23:10,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/e97c6a31469547c5bb516013c4259aa0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0 2024-11-23T03:23:10,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0, entries=200, sequenceid=356, filesize=14.4 K 2024-11-23T03:23:10,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/43c3a73c89354394b2699439305b1c1f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f 2024-11-23T03:23:10,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:23:10,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/80da2f6cf3724f7c84745ae961e7d64a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a 2024-11-23T03:23:10,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:23:10,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6a5f5cf259ce583546a3c6bfa36ac47c in 535ms, sequenceid=356, compaction requested=true 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,047 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,047 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:10,051 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37649 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,051 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,051 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:10,051 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:10,051 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,051 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e28a2978cf843fb91fd0154fe919617, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=34.4 K 2024-11-23T03:23:10,052 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,052 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f9ecb3da1fb44e4b16dcc946e15dfd1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.8 K 2024-11-23T03:23:10,053 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e28a2978cf843fb91fd0154fe919617, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:10,053 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f9ecb3da1fb44e4b16dcc946e15dfd1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:10,053 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6341df4adef349ff8af1f5b6ab68e936, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332188171 2024-11-23T03:23:10,054 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86d4c05f004b49cba5a048993c5923d6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332188171 2024-11-23T03:23:10,054 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 43c3a73c89354394b2699439305b1c1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,054 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e97c6a31469547c5bb516013c4259aa0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,077 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,078 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/84fe6d243e924bad83ac837262c61c04 is 50, key is test_row_0/A:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:10,085 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,086 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/ce98949cf763421995a47487d1c2b946 is 50, key is test_row_0/B:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:10,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T03:23:10,138 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742376_1552 (size=13153) 2024-11-23T03:23:10,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T03:23:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,139 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:10,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,147 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/84fe6d243e924bad83ac837262c61c04 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/84fe6d243e924bad83ac837262c61c04 2024-11-23T03:23:10,152 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 84fe6d243e924bad83ac837262c61c04(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,152 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,152 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332190047; duration=0sec 2024-11-23T03:23:10,152 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:10,152 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:10,152 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,153 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,153 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:10,153 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,153 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/da29af4faea248ca878dff5a32f3b14a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=34.4 K 2024-11-23T03:23:10,153 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting da29af4faea248ca878dff5a32f3b14a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732332187832 2024-11-23T03:23:10,154 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting f32f6be15c594023b68ef0b8585f3054, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732332188171 2024-11-23T03:23:10,154 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80da2f6cf3724f7c84745ae961e7d64a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:10,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:10,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742377_1553 (size=13153) 2024-11-23T03:23:10,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d6c98aad94114dddaf846da79555c1c6 is 50, key is test_row_0/A:col10/1732332189522/Put/seqid=0 2024-11-23T03:23:10,167 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#476 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,167 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/d840d6d5800e47ee866aab04f702ef7d is 50, key is test_row_0/C:col10/1732332189387/Put/seqid=0 2024-11-23T03:23:10,168 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/ce98949cf763421995a47487d1c2b946 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ce98949cf763421995a47487d1c2b946 2024-11-23T03:23:10,172 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into ce98949cf763421995a47487d1c2b946(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,173 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,173 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332190047; duration=0sec 2024-11-23T03:23:10,173 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,173 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:10,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742378_1554 (size=12301) 2024-11-23T03:23:10,234 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d6c98aad94114dddaf846da79555c1c6 2024-11-23T03:23:10,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/cd4bc5bcedb24483a6253f671f0cc476 is 50, key is test_row_0/B:col10/1732332189522/Put/seqid=0 2024-11-23T03:23:10,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742379_1555 (size=13153) 2024-11-23T03:23:10,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742380_1556 (size=12301) 2024-11-23T03:23:10,301 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/cd4bc5bcedb24483a6253f671f0cc476 2024-11-23T03:23:10,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332250297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/7d194bd6228e4ad68a4c6beb36d16fef is 50, key is test_row_0/C:col10/1732332189522/Put/seqid=0 2024-11-23T03:23:10,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332250310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332250314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742381_1557 (size=12301) 2024-11-23T03:23:10,353 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/7d194bd6228e4ad68a4c6beb36d16fef 2024-11-23T03:23:10,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d6c98aad94114dddaf846da79555c1c6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6 2024-11-23T03:23:10,363 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6, entries=150, sequenceid=365, filesize=12.0 K 2024-11-23T03:23:10,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/cd4bc5bcedb24483a6253f671f0cc476 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476 2024-11-23T03:23:10,370 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476, entries=150, sequenceid=365, filesize=12.0 K 2024-11-23T03:23:10,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/7d194bd6228e4ad68a4c6beb36d16fef as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef 2024-11-23T03:23:10,375 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef, entries=150, sequenceid=365, filesize=12.0 K 2024-11-23T03:23:10,376 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 6a5f5cf259ce583546a3c6bfa36ac47c in 238ms, sequenceid=365, compaction requested=false 2024-11-23T03:23:10,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-23T03:23:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-23T03:23:10,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-23T03:23:10,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 852 msec 2024-11-23T03:23:10,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 855 msec 2024-11-23T03:23:10,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:10,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:10,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332250428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332250431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fb7ae969d6c34ab2bc2fb7993d8cd901 is 50, key is test_row_0/A:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332250434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742382_1558 (size=14741) 2024-11-23T03:23:10,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fb7ae969d6c34ab2bc2fb7993d8cd901 2024-11-23T03:23:10,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/17c609c531ac402f9f007a68d1bfcd82 is 50, key is test_row_0/B:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332250538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332250538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332250544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742383_1559 (size=12301) 2024-11-23T03:23:10,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/17c609c531ac402f9f007a68d1bfcd82 2024-11-23T03:23:10,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bba28ec2c6bd47d9bc9b29476924e6e9 is 50, key is test_row_0/C:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T03:23:10,629 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-23T03:23:10,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-23T03:23:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:10,632 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:10,633 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:10,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:10,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742384_1560 (size=12301) 2024-11-23T03:23:10,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bba28ec2c6bd47d9bc9b29476924e6e9 2024-11-23T03:23:10,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fb7ae969d6c34ab2bc2fb7993d8cd901 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901 2024-11-23T03:23:10,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901, entries=200, sequenceid=396, filesize=14.4 K 2024-11-23T03:23:10,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/17c609c531ac402f9f007a68d1bfcd82 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82 2024-11-23T03:23:10,662 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/d840d6d5800e47ee866aab04f702ef7d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/d840d6d5800e47ee866aab04f702ef7d 2024-11-23T03:23:10,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82, entries=150, sequenceid=396, filesize=12.0 K 2024-11-23T03:23:10,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/bba28ec2c6bd47d9bc9b29476924e6e9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9 2024-11-23T03:23:10,669 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into d840d6d5800e47ee866aab04f702ef7d(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,669 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332190047; duration=0sec 2024-11-23T03:23:10,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,669 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:10,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9, entries=150, sequenceid=396, filesize=12.0 K 2024-11-23T03:23:10,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 6a5f5cf259ce583546a3c6bfa36ac47c in 251ms, sequenceid=396, compaction requested=true 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-23T03:23:10,674 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,674 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,675 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,675 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:10,675 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,675 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/84fe6d243e924bad83ac837262c61c04, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=39.3 K 2024-11-23T03:23:10,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:10,676 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,676 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/d840d6d5800e47ee866aab04f702ef7d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.9 K 2024-11-23T03:23:10,676 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 84fe6d243e924bad83ac837262c61c04, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,676 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d840d6d5800e47ee866aab04f702ef7d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,677 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d6c98aad94114dddaf846da79555c1c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732332189518 2024-11-23T03:23:10,677 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d194bd6228e4ad68a4c6beb36d16fef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732332189518 2024-11-23T03:23:10,677 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fb7ae969d6c34ab2bc2fb7993d8cd901, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:10,678 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting bba28ec2c6bd47d9bc9b29476924e6e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:10,688 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#482 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,688 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/4e4ec4b6ce904ff4ae7166ff676db9df is 50, key is test_row_0/A:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,691 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#483 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,691 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/a0a3f0c3cd59452fb72b96affef4ec9f is 50, key is test_row_0/C:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742385_1561 (size=13255) 2024-11-23T03:23:10,731 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/4e4ec4b6ce904ff4ae7166ff676db9df as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/4e4ec4b6ce904ff4ae7166ff676db9df 2024-11-23T03:23:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:10,736 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 4e4ec4b6ce904ff4ae7166ff676db9df(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,736 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,736 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332190674; duration=0sec 2024-11-23T03:23:10,736 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:10,736 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:10,736 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:10,737 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:10,737 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:10,737 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,738 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ce98949cf763421995a47487d1c2b946, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=36.9 K 2024-11-23T03:23:10,738 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ce98949cf763421995a47487d1c2b946, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332189357 2024-11-23T03:23:10,738 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting cd4bc5bcedb24483a6253f671f0cc476, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732332189518 2024-11-23T03:23:10,738 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 17c609c531ac402f9f007a68d1bfcd82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:10,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:10,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:10,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742386_1562 (size=13255) 2024-11-23T03:23:10,767 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/a0a3f0c3cd59452fb72b96affef4ec9f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a0a3f0c3cd59452fb72b96affef4ec9f 2024-11-23T03:23:10,771 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#484 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:10,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fedc6c895e90403490b66861465e760a is 50, key is test_row_0/A:col10/1732332190759/Put/seqid=0 2024-11-23T03:23:10,772 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/086ad1d0e3874d9a8134ae63ea419b08 is 50, key is test_row_0/B:col10/1732332190421/Put/seqid=0 2024-11-23T03:23:10,775 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into a0a3f0c3cd59452fb72b96affef4ec9f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,775 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,775 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332190674; duration=0sec 2024-11-23T03:23:10,775 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,775 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:10,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:10,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:10,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742387_1563 (size=19621) 2024-11-23T03:23:10,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fedc6c895e90403490b66861465e760a 2024-11-23T03:23:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742388_1564 (size=13255) 2024-11-23T03:23:10,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332250828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,834 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/086ad1d0e3874d9a8134ae63ea419b08 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/086ad1d0e3874d9a8134ae63ea419b08 2024-11-23T03:23:10,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332250830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65f75e9fffbe4979857c36310b5d466d is 50, key is test_row_0/B:col10/1732332190759/Put/seqid=0 2024-11-23T03:23:10,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332250831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,855 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 086ad1d0e3874d9a8134ae63ea419b08(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:10,855 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:10,855 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332190674; duration=0sec 2024-11-23T03:23:10,855 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:10,855 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:10,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742389_1565 (size=12301) 2024-11-23T03:23:10,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:10,938 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332250933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:10,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:10,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:10,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:10,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332250945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:10,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:10,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332250949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,091 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332251141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332251152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332251158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:11,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65f75e9fffbe4979857c36310b5d466d 2024-11-23T03:23:11,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/baa5743e6af9491395150aac19e6067a is 50, key is test_row_0/C:col10/1732332190759/Put/seqid=0 2024-11-23T03:23:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742390_1566 (size=12301) 2024-11-23T03:23:11,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332251445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332251460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332251463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,555 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,708 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:11,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/baa5743e6af9491395150aac19e6067a 2024-11-23T03:23:11,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/fedc6c895e90403490b66861465e760a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a 2024-11-23T03:23:11,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a, entries=300, sequenceid=411, filesize=19.2 K 2024-11-23T03:23:11,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/65f75e9fffbe4979857c36310b5d466d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d 2024-11-23T03:23:11,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d, entries=150, sequenceid=411, filesize=12.0 K 2024-11-23T03:23:11,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/baa5743e6af9491395150aac19e6067a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a 2024-11-23T03:23:11,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a, entries=150, sequenceid=411, filesize=12.0 K 2024-11-23T03:23:11,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1031ms, sequenceid=411, compaction requested=false 2024-11-23T03:23:11,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:11,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T03:23:11,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:11,863 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:11,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:11,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:11,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:11,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:11,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:11,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:11,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/13cacbe335a14e6b88ab5b8087bc7e9d is 50, key is test_row_0/A:col10/1732332190824/Put/seqid=0 2024-11-23T03:23:11,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742391_1567 (size=12301) 2024-11-23T03:23:11,920 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/13cacbe335a14e6b88ab5b8087bc7e9d 2024-11-23T03:23:11,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bd7029abec9f4ebd81533bd09cb8ce8f is 50, key is test_row_0/B:col10/1732332190824/Put/seqid=0 2024-11-23T03:23:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:11,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:11,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742392_1568 (size=12301) 2024-11-23T03:23:11,979 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bd7029abec9f4ebd81533bd09cb8ce8f 2024-11-23T03:23:11,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/fcf558a5d7564838b908dc313400d790 is 50, key is test_row_0/C:col10/1732332190824/Put/seqid=0 2024-11-23T03:23:11,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332251986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332251988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:11,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332251989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742393_1569 (size=12301) 2024-11-23T03:23:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332252095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332252097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332252098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332252303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332252305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332252305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,410 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/fcf558a5d7564838b908dc313400d790 2024-11-23T03:23:12,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/13cacbe335a14e6b88ab5b8087bc7e9d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d 2024-11-23T03:23:12,420 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d, entries=150, sequenceid=436, filesize=12.0 K 2024-11-23T03:23:12,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bd7029abec9f4ebd81533bd09cb8ce8f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f 2024-11-23T03:23:12,429 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f, entries=150, sequenceid=436, filesize=12.0 K 2024-11-23T03:23:12,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/fcf558a5d7564838b908dc313400d790 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790 2024-11-23T03:23:12,444 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790, entries=150, sequenceid=436, filesize=12.0 K 2024-11-23T03:23:12,445 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 6a5f5cf259ce583546a3c6bfa36ac47c in 583ms, sequenceid=436, compaction requested=true 2024-11-23T03:23:12,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:12,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:12,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T03:23:12,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-23T03:23:12,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-23T03:23:12,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8130 sec 2024-11-23T03:23:12,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.8250 sec 2024-11-23T03:23:12,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:23:12,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:12,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:12,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:12,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:12,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/426ce94d51c94a06bea36d68d9953e46 is 50, key is test_row_0/A:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742394_1570 (size=14741) 2024-11-23T03:23:12,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332252665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/426ce94d51c94a06bea36d68d9953e46 2024-11-23T03:23:12,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332252671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332252673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/0c292e2bb63d42ea980f2da5f3816b8e is 50, key is test_row_0/B:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742395_1571 (size=12301) 2024-11-23T03:23:12,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/0c292e2bb63d42ea980f2da5f3816b8e 2024-11-23T03:23:12,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/73808bc22046430982fdc32bd6bb75fd is 50, key is test_row_0/C:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T03:23:12,739 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-23T03:23:12,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:12,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-23T03:23:12,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T03:23:12,742 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:12,743 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:12,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:12,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742396_1572 (size=12301) 2024-11-23T03:23:12,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/73808bc22046430982fdc32bd6bb75fd 2024-11-23T03:23:12,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/426ce94d51c94a06bea36d68d9953e46 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46 2024-11-23T03:23:12,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332252775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332252780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46, entries=200, sequenceid=451, filesize=14.4 K 2024-11-23T03:23:12,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332252784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/0c292e2bb63d42ea980f2da5f3816b8e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e 2024-11-23T03:23:12,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e, entries=150, sequenceid=451, filesize=12.0 K 2024-11-23T03:23:12,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/73808bc22046430982fdc32bd6bb75fd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd 2024-11-23T03:23:12,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd, entries=150, sequenceid=451, filesize=12.0 K 2024-11-23T03:23:12,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6a5f5cf259ce583546a3c6bfa36ac47c in 192ms, sequenceid=451, compaction requested=true 2024-11-23T03:23:12,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:12,801 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:12,802 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:12,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:12,803 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59918 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:12,803 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:12,803 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:12,804 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/4e4ec4b6ce904ff4ae7166ff676db9df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=58.5 K 2024-11-23T03:23:12,804 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:12,804 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e4ec4b6ce904ff4ae7166ff676db9df, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:12,804 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:12,804 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:12,804 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/086ad1d0e3874d9a8134ae63ea419b08, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=49.0 K 2024-11-23T03:23:12,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting fedc6c895e90403490b66861465e760a, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732332190429 2024-11-23T03:23:12,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 086ad1d0e3874d9a8134ae63ea419b08, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:12,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13cacbe335a14e6b88ab5b8087bc7e9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732332190824 2024-11-23T03:23:12,805 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 65f75e9fffbe4979857c36310b5d466d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732332190429 2024-11-23T03:23:12,805 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 426ce94d51c94a06bea36d68d9953e46, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:12,806 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting bd7029abec9f4ebd81533bd09cb8ce8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732332190824 2024-11-23T03:23:12,807 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c292e2bb63d42ea980f2da5f3816b8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:12,832 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#494 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:12,833 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/eeb5a2320ebe40e98c0a08457ba256fa is 50, key is test_row_0/B:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T03:23:12,856 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#495 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:12,856 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/77d8e16f222646508cb9c64804e70133 is 50, key is test_row_0/A:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,895 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:12,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T03:23:12,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:12,895 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:12,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742397_1573 (size=13391) 2024-11-23T03:23:12,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8a53a66305744ca9bc78db9d6b204fa8 is 50, key is test_row_0/A:col10/1732332192671/Put/seqid=0 2024-11-23T03:23:12,912 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/eeb5a2320ebe40e98c0a08457ba256fa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/eeb5a2320ebe40e98c0a08457ba256fa 2024-11-23T03:23:12,918 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into eeb5a2320ebe40e98c0a08457ba256fa(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:12,918 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:12,918 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=12, startTime=1732332192802; duration=0sec 2024-11-23T03:23:12,918 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:12,918 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:12,918 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:12,919 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:12,919 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:12,920 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:12,920 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a0a3f0c3cd59452fb72b96affef4ec9f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=49.0 K 2024-11-23T03:23:12,920 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a0a3f0c3cd59452fb72b96affef4ec9f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732332190295 2024-11-23T03:23:12,920 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting baa5743e6af9491395150aac19e6067a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732332190429 2024-11-23T03:23:12,921 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fcf558a5d7564838b908dc313400d790, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732332190824 2024-11-23T03:23:12,922 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 73808bc22046430982fdc32bd6bb75fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:12,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742398_1574 (size=13391) 2024-11-23T03:23:12,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742399_1575 (size=12301) 2024-11-23T03:23:12,955 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8a53a66305744ca9bc78db9d6b204fa8 2024-11-23T03:23:12,967 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#497 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:12,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/ff6a8d8645364ea58e8422d34f019cb3 is 50, key is test_row_0/B:col10/1732332192671/Put/seqid=0 2024-11-23T03:23:12,973 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/b837fdcadd3c472095c3ba8b6982308d is 50, key is test_row_0/C:col10/1732332191973/Put/seqid=0 2024-11-23T03:23:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:12,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:12,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742400_1576 (size=13391) 2024-11-23T03:23:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742401_1577 (size=12301) 2024-11-23T03:23:12,995 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/b837fdcadd3c472095c3ba8b6982308d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/b837fdcadd3c472095c3ba8b6982308d 2024-11-23T03:23:12,996 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/ff6a8d8645364ea58e8422d34f019cb3 2024-11-23T03:23:13,002 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into b837fdcadd3c472095c3ba8b6982308d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:13,002 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:13,002 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=12, startTime=1732332192802; duration=0sec 2024-11-23T03:23:13,002 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:13,002 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:13,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/8b08e3652bc247ab982336f41b743e61 is 50, key is test_row_0/C:col10/1732332192671/Put/seqid=0 2024-11-23T03:23:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T03:23:13,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742402_1578 (size=12301) 2024-11-23T03:23:13,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332253063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332253063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332253064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332253177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332253178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332253178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,335 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/77d8e16f222646508cb9c64804e70133 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/77d8e16f222646508cb9c64804e70133 2024-11-23T03:23:13,339 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 77d8e16f222646508cb9c64804e70133(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:13,339 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:13,339 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=12, startTime=1732332192801; duration=0sec 2024-11-23T03:23:13,339 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:13,339 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T03:23:13,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332253381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332253382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332253382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,445 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/8b08e3652bc247ab982336f41b743e61 2024-11-23T03:23:13,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8a53a66305744ca9bc78db9d6b204fa8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8 2024-11-23T03:23:13,454 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:23:13,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/ff6a8d8645364ea58e8422d34f019cb3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3 2024-11-23T03:23:13,460 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:23:13,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/8b08e3652bc247ab982336f41b743e61 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61 2024-11-23T03:23:13,472 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61, entries=150, sequenceid=472, filesize=12.0 K 2024-11-23T03:23:13,473 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 6a5f5cf259ce583546a3c6bfa36ac47c in 578ms, sequenceid=472, compaction requested=false 2024-11-23T03:23:13,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:13,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:13,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T03:23:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-23T03:23:13,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-23T03:23:13,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 731 msec 2024-11-23T03:23:13,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 735 msec 2024-11-23T03:23:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:13,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:13,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d8260c65b0c0497fabf160435db6316b is 50, key is test_row_0/A:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742403_1579 (size=14741) 2024-11-23T03:23:13,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332253730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332253731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332253731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332253838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332253839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332253840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T03:23:13,846 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-23T03:23:13,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-23T03:23:13,848 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:13,849 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:13,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:14,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332254042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332254042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332254043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d8260c65b0c0497fabf160435db6316b 2024-11-23T03:23:14,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6fe35a360eca45b892f7193bd19558ca is 50, key is test_row_0/B:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:14,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742404_1580 (size=12301) 2024-11-23T03:23:14,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:14,153 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332254348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332254348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332254349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:14,458 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6fe35a360eca45b892f7193bd19558ca 2024-11-23T03:23:14,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/89f7e382662a4598a175dd7ece23a39c is 50, key is test_row_0/C:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742405_1581 (size=12301) 2024-11-23T03:23:14,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332254853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332254854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:14,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332254855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:14,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:14,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:14,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:14,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/89f7e382662a4598a175dd7ece23a39c 2024-11-23T03:23:14,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/d8260c65b0c0497fabf160435db6316b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b 2024-11-23T03:23:14,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b, entries=200, sequenceid=493, filesize=14.4 K 2024-11-23T03:23:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6fe35a360eca45b892f7193bd19558ca as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca 2024-11-23T03:23:14,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca, entries=150, sequenceid=493, filesize=12.0 K 2024-11-23T03:23:14,942 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T03:23:14,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/89f7e382662a4598a175dd7ece23a39c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c 2024-11-23T03:23:14,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c, entries=150, sequenceid=493, filesize=12.0 K 2024-11-23T03:23:14,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1256ms, sequenceid=493, compaction requested=true 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:14,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:14,947 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:14,948 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,948 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:14,948 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/eeb5a2320ebe40e98c0a08457ba256fa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.1 K 2024-11-23T03:23:14,948 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/77d8e16f222646508cb9c64804e70133, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=39.5 K 2024-11-23T03:23:14,948 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting eeb5a2320ebe40e98c0a08457ba256fa, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:14,948 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77d8e16f222646508cb9c64804e70133, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:14,948 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ff6a8d8645364ea58e8422d34f019cb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332192651 2024-11-23T03:23:14,948 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a53a66305744ca9bc78db9d6b204fa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332192651 2024-11-23T03:23:14,949 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8260c65b0c0497fabf160435db6316b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:14,949 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe35a360eca45b892f7193bd19558ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:14,956 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:14,956 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#503 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:14,956 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6f5e31a5bb954780b2f00cf59c1c06e2 is 50, key is test_row_0/B:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:14,957 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/03419d62659d4c90bd0c098274ff6780 is 50, key is test_row_0/A:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:14,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742406_1582 (size=13493) 2024-11-23T03:23:14,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742407_1583 (size=13493) 2024-11-23T03:23:15,069 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:15,070 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8136108e69fe4d558d049defe021b963 is 50, key is test_row_0/A:col10/1732332193730/Put/seqid=0 2024-11-23T03:23:15,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742408_1584 (size=12301) 2024-11-23T03:23:15,369 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/6f5e31a5bb954780b2f00cf59c1c06e2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6f5e31a5bb954780b2f00cf59c1c06e2 2024-11-23T03:23:15,371 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/03419d62659d4c90bd0c098274ff6780 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/03419d62659d4c90bd0c098274ff6780 2024-11-23T03:23:15,374 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 6f5e31a5bb954780b2f00cf59c1c06e2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:15,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:15,374 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332194947; duration=0sec 2024-11-23T03:23:15,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:15,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:15,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:15,375 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 03419d62659d4c90bd0c098274ff6780(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:15,375 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:15,375 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332194947; duration=0sec 2024-11-23T03:23:15,375 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:15,375 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:15,376 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:15,376 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:15,376 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:15,376 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/b837fdcadd3c472095c3ba8b6982308d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.1 K 2024-11-23T03:23:15,377 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting b837fdcadd3c472095c3ba8b6982308d, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732332191973 2024-11-23T03:23:15,377 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b08e3652bc247ab982336f41b743e61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732332192651 2024-11-23T03:23:15,377 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 89f7e382662a4598a175dd7ece23a39c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:15,382 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:15,383 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/66f7352e9e204357af0058ad39701467 is 50, key is test_row_0/C:col10/1732332193021/Put/seqid=0 2024-11-23T03:23:15,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742409_1585 (size=13493) 2024-11-23T03:23:15,390 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/66f7352e9e204357af0058ad39701467 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/66f7352e9e204357af0058ad39701467 2024-11-23T03:23:15,393 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 66f7352e9e204357af0058ad39701467(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:15,393 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:15,394 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332194947; duration=0sec 2024-11-23T03:23:15,394 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:15,394 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:15,479 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8136108e69fe4d558d049defe021b963 2024-11-23T03:23:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/02110fbd03574cf2bee5f756390e2c94 is 50, key is test_row_0/B:col10/1732332193730/Put/seqid=0 2024-11-23T03:23:15,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742410_1586 (size=12301) 2024-11-23T03:23:15,489 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/02110fbd03574cf2bee5f756390e2c94 2024-11-23T03:23:15,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/eedb1d2f41df4c8d8cea4894b3126661 is 50, key is test_row_0/C:col10/1732332193730/Put/seqid=0 2024-11-23T03:23:15,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742411_1587 (size=12301) 2024-11-23T03:23:15,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:15,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:15,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332255829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332255833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45768 deadline: 1732332255862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45772 deadline: 1732332255862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45734 deadline: 1732332255865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,900 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/eedb1d2f41df4c8d8cea4894b3126661 2024-11-23T03:23:15,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/8136108e69fe4d558d049defe021b963 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963 2024-11-23T03:23:15,908 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963, entries=150, sequenceid=512, filesize=12.0 K 2024-11-23T03:23:15,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/02110fbd03574cf2bee5f756390e2c94 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94 2024-11-23T03:23:15,912 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94, entries=150, sequenceid=512, filesize=12.0 K 2024-11-23T03:23:15,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/eedb1d2f41df4c8d8cea4894b3126661 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661 2024-11-23T03:23:15,916 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661, entries=150, sequenceid=512, filesize=12.0 K 2024-11-23T03:23:15,917 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6a5f5cf259ce583546a3c6bfa36ac47c in 847ms, sequenceid=512, compaction requested=false 2024-11-23T03:23:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-23T03:23:15,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-23T03:23:15,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-23T03:23:15,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0690 sec 2024-11-23T03:23:15,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.0730 sec 2024-11-23T03:23:15,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:15,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T03:23:15,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:15,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec9e55d0aaa74b58820642a81ab4eecf is 50, key is test_row_0/A:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742412_1588 (size=14741) 2024-11-23T03:23:15,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T03:23:15,953 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-23T03:23:15,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:15,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-23T03:23:15,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:15,956 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:15,957 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:15,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:15,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332255986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:15,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:15,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332255987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:16,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332256092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332256092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:16,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332256299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332256299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec9e55d0aaa74b58820642a81ab4eecf 2024-11-23T03:23:16,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/22328963720e46fe82ad23d43d7f3e83 is 50, key is test_row_0/B:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:16,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742413_1589 (size=12301) 2024-11-23T03:23:16,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,429 DEBUG [Thread-2130 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:61411 2024-11-23T03:23:16,429 DEBUG [Thread-2130 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:16,429 DEBUG [Thread-2134 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:61411 2024-11-23T03:23:16,430 DEBUG [Thread-2134 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:16,431 DEBUG [Thread-2136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:61411 2024-11-23T03:23:16,431 DEBUG [Thread-2136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:16,433 DEBUG [Thread-2128 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34b30c39 to 127.0.0.1:61411 2024-11-23T03:23:16,433 DEBUG [Thread-2128 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:16,434 DEBUG [Thread-2132 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:61411 2024-11-23T03:23:16,434 DEBUG [Thread-2132 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:16,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332256605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332256605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/22328963720e46fe82ad23d43d7f3e83 2024-11-23T03:23:16,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/50d4055151d743fdb140a50abaebe880 is 50, key is test_row_0/C:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:16,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742414_1590 (size=12301) 2024-11-23T03:23:16,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:16,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:16,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:17,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:17,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:17,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:17,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45718 deadline: 1732332257106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:17,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45738 deadline: 1732332257109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:17,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/50d4055151d743fdb140a50abaebe880 2024-11-23T03:23:17,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/ec9e55d0aaa74b58820642a81ab4eecf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf 2024-11-23T03:23:17,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf, entries=200, sequenceid=533, filesize=14.4 K 2024-11-23T03:23:17,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/22328963720e46fe82ad23d43d7f3e83 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83 2024-11-23T03:23:17,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83, entries=150, sequenceid=533, filesize=12.0 K 2024-11-23T03:23:17,176 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:17,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/50d4055151d743fdb140a50abaebe880 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880 2024-11-23T03:23:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:17,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880, entries=150, sequenceid=533, filesize=12.0 K 2024-11-23T03:23:17,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1241ms, sequenceid=533, compaction requested=true 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:17,179 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a5f5cf259ce583546a3c6bfa36ac47c:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:17,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/A is initiating minor compaction (all files) 2024-11-23T03:23:17,180 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/A in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/B is initiating minor compaction (all files) 2024-11-23T03:23:17,180 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/03419d62659d4c90bd0c098274ff6780, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=39.6 K 2024-11-23T03:23:17,180 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/B in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,180 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6f5e31a5bb954780b2f00cf59c1c06e2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.2 K 2024-11-23T03:23:17,180 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03419d62659d4c90bd0c098274ff6780, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:17,181 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f5e31a5bb954780b2f00cf59c1c06e2, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:17,181 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8136108e69fe4d558d049defe021b963, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1732332193702 2024-11-23T03:23:17,181 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 02110fbd03574cf2bee5f756390e2c94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1732332193702 2024-11-23T03:23:17,181 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec9e55d0aaa74b58820642a81ab4eecf, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732332195818 2024-11-23T03:23:17,181 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 22328963720e46fe82ad23d43d7f3e83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732332195818 2024-11-23T03:23:17,186 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#B#compaction#512 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:17,186 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3c597ee96efa4002a574cc1e330a5576 is 50, key is test_row_0/B:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:17,187 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#A#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:17,187 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/45208307604a47b9a781da1778a2a8a4 is 50, key is test_row_0/A:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:17,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742415_1591 (size=13595) 2024-11-23T03:23:17,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742416_1592 (size=13595) 2024-11-23T03:23:17,328 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:17,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T03:23:17,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,329 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:17,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:17,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/630a0d553c8d493db09c58906653c92c is 50, key is test_row_0/A:col10/1732332195965/Put/seqid=0 2024-11-23T03:23:17,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742417_1593 (size=12301) 2024-11-23T03:23:17,593 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/3c597ee96efa4002a574cc1e330a5576 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3c597ee96efa4002a574cc1e330a5576 2024-11-23T03:23:17,594 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/45208307604a47b9a781da1778a2a8a4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/45208307604a47b9a781da1778a2a8a4 2024-11-23T03:23:17,597 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/B of 6a5f5cf259ce583546a3c6bfa36ac47c into 3c597ee96efa4002a574cc1e330a5576(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:17,597 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/A of 6a5f5cf259ce583546a3c6bfa36ac47c into 45208307604a47b9a781da1778a2a8a4(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:17,597 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/B, priority=13, startTime=1732332197179; duration=0sec 2024-11-23T03:23:17,597 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/A, priority=13, startTime=1732332197179; duration=0sec 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:A 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:17,597 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:B 2024-11-23T03:23:17,598 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:17,598 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): 6a5f5cf259ce583546a3c6bfa36ac47c/C is initiating minor compaction (all files) 2024-11-23T03:23:17,598 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a5f5cf259ce583546a3c6bfa36ac47c/C in TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:17,598 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/66f7352e9e204357af0058ad39701467, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp, totalSize=37.2 K 2024-11-23T03:23:17,598 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66f7352e9e204357af0058ad39701467, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=493, earliestPutTs=1732332193021 2024-11-23T03:23:17,598 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting eedb1d2f41df4c8d8cea4894b3126661, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=512, earliestPutTs=1732332193702 2024-11-23T03:23:17,599 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50d4055151d743fdb140a50abaebe880, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732332195818 2024-11-23T03:23:17,603 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a5f5cf259ce583546a3c6bfa36ac47c#C#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:17,603 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/1fe0b0ccda38433bae20df013b7353cc is 50, key is test_row_0/C:col10/1732332195828/Put/seqid=0 2024-11-23T03:23:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742418_1594 (size=13595) 2024-11-23T03:23:17,738 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/630a0d553c8d493db09c58906653c92c 2024-11-23T03:23:17,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/f2462dafa5a8491380fe6f1ad2deae01 is 50, key is test_row_0/B:col10/1732332195965/Put/seqid=0 2024-11-23T03:23:17,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742419_1595 (size=12301) 2024-11-23T03:23:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:17,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. as already flushing 2024-11-23T03:23:17,875 DEBUG [Thread-2117 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:61411 2024-11-23T03:23:17,875 DEBUG [Thread-2117 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:17,885 DEBUG [Thread-2125 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:61411 2024-11-23T03:23:17,885 DEBUG [Thread-2125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:17,886 DEBUG [Thread-2119 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:61411 2024-11-23T03:23:17,886 DEBUG [Thread-2119 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:18,010 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/1fe0b0ccda38433bae20df013b7353cc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/1fe0b0ccda38433bae20df013b7353cc 2024-11-23T03:23:18,013 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a5f5cf259ce583546a3c6bfa36ac47c/C of 6a5f5cf259ce583546a3c6bfa36ac47c into 1fe0b0ccda38433bae20df013b7353cc(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:18,013 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:18,013 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c., storeName=6a5f5cf259ce583546a3c6bfa36ac47c/C, priority=13, startTime=1732332197179; duration=0sec 2024-11-23T03:23:18,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:18,014 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a5f5cf259ce583546a3c6bfa36ac47c:C 2024-11-23T03:23:18,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:18,110 DEBUG [Thread-2123 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:61411 2024-11-23T03:23:18,110 DEBUG [Thread-2123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:18,120 DEBUG [Thread-2121 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:61411 2024-11-23T03:23:18,120 DEBUG [Thread-2121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:18,146 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/f2462dafa5a8491380fe6f1ad2deae01 2024-11-23T03:23:18,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/40dfcd4970f8499d8a3f0757f81b9a70 is 50, key is test_row_0/C:col10/1732332195965/Put/seqid=0 2024-11-23T03:23:18,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742420_1596 (size=12301) 2024-11-23T03:23:18,555 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/40dfcd4970f8499d8a3f0757f81b9a70 2024-11-23T03:23:18,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/630a0d553c8d493db09c58906653c92c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/630a0d553c8d493db09c58906653c92c 2024-11-23T03:23:18,560 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/630a0d553c8d493db09c58906653c92c, entries=150, sequenceid=551, filesize=12.0 K 2024-11-23T03:23:18,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/f2462dafa5a8491380fe6f1ad2deae01 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/f2462dafa5a8491380fe6f1ad2deae01 2024-11-23T03:23:18,563 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/f2462dafa5a8491380fe6f1ad2deae01, entries=150, sequenceid=551, filesize=12.0 K 2024-11-23T03:23:18,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/40dfcd4970f8499d8a3f0757f81b9a70 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/40dfcd4970f8499d8a3f0757f81b9a70 2024-11-23T03:23:18,566 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/40dfcd4970f8499d8a3f0757f81b9a70, entries=150, sequenceid=551, filesize=12.0 K 2024-11-23T03:23:18,566 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=33.54 KB/34350 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1237ms, sequenceid=551, compaction requested=false 2024-11-23T03:23:18,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:18,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:18,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-23T03:23:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-23T03:23:18,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-23T03:23:18,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6100 sec 2024-11-23T03:23:18,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 2.6140 sec 2024-11-23T03:23:20,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T03:23:20,061 INFO [Thread-2127 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 123 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 129 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1846 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5538 rows 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1828 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5484 rows 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1836 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5508 rows 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1844 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5532 rows 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1851 2024-11-23T03:23:20,061 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5553 rows 2024-11-23T03:23:20,061 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:23:20,061 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x238db126 to 127.0.0.1:61411 2024-11-23T03:23:20,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:20,063 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:23:20,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:23:20,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:20,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:20,067 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332200066"}]},"ts":"1732332200066"} 2024-11-23T03:23:20,068 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:23:20,070 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:23:20,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:23:20,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, UNASSIGN}] 2024-11-23T03:23:20,073 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, UNASSIGN 2024-11-23T03:23:20,074 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=6a5f5cf259ce583546a3c6bfa36ac47c, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:20,075 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:23:20,075 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure 6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:23:20,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:20,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:20,227 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing 6a5f5cf259ce583546a3c6bfa36ac47c, disabling compactions & flushes 2024-11-23T03:23:20,227 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. after waiting 0 ms 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:20,227 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(2837): Flushing 6a5f5cf259ce583546a3c6bfa36ac47c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=A 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=B 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a5f5cf259ce583546a3c6bfa36ac47c, store=C 2024-11-23T03:23:20,227 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:20,230 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/b899bf9731004fb49d97240492c2e5b7 is 50, key is test_row_0/A:col10/1732332198119/Put/seqid=0 2024-11-23T03:23:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742421_1597 (size=12301) 2024-11-23T03:23:20,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:20,634 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/b899bf9731004fb49d97240492c2e5b7 2024-11-23T03:23:20,639 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bb7e65d890a84cbd8c68ccaebfb68ccc is 50, key is test_row_0/B:col10/1732332198119/Put/seqid=0 2024-11-23T03:23:20,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742422_1598 (size=12301) 2024-11-23T03:23:20,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:21,043 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bb7e65d890a84cbd8c68ccaebfb68ccc 2024-11-23T03:23:21,049 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/de98abb720a04d48a4eaa89b1436175d is 50, key is test_row_0/C:col10/1732332198119/Put/seqid=0 2024-11-23T03:23:21,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742423_1599 (size=12301) 2024-11-23T03:23:21,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:21,453 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/de98abb720a04d48a4eaa89b1436175d 2024-11-23T03:23:21,456 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/A/b899bf9731004fb49d97240492c2e5b7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b899bf9731004fb49d97240492c2e5b7 2024-11-23T03:23:21,459 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b899bf9731004fb49d97240492c2e5b7, entries=150, sequenceid=562, filesize=12.0 K 2024-11-23T03:23:21,460 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/B/bb7e65d890a84cbd8c68ccaebfb68ccc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bb7e65d890a84cbd8c68ccaebfb68ccc 2024-11-23T03:23:21,462 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bb7e65d890a84cbd8c68ccaebfb68ccc, entries=150, sequenceid=562, filesize=12.0 K 2024-11-23T03:23:21,463 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/.tmp/C/de98abb720a04d48a4eaa89b1436175d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/de98abb720a04d48a4eaa89b1436175d 2024-11-23T03:23:21,466 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/de98abb720a04d48a4eaa89b1436175d, entries=150, sequenceid=562, filesize=12.0 K 2024-11-23T03:23:21,467 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 6a5f5cf259ce583546a3c6bfa36ac47c in 1239ms, sequenceid=562, compaction requested=true 2024-11-23T03:23:21,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/cde7911cb60b4f8491f59809e71b7285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/314c4034a4c14b0a97656a08bd4ca6f7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b3422d4e4c2540958e6ff25b169e98d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/017ded80f3ef4523a15a4c73804c5112, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/a8fdd84621d240839e6cd1a03ca4fd68, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d5a8f74774b647ed857e2f9a5a738285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f9ecb3da1fb44e4b16dcc946e15dfd1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/84fe6d243e924bad83ac837262c61c04, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/4e4ec4b6ce904ff4ae7166ff676db9df, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/77d8e16f222646508cb9c64804e70133, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/03419d62659d4c90bd0c098274ff6780, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf] to archive 2024-11-23T03:23:21,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:21,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e91e1dbd756745249c6f19108caa72d7 2024-11-23T03:23:21,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/97f2a4b3647a4ec893d15683b544fdcb 2024-11-23T03:23:21,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2bfe0347aa1343618cb8dd06cbf96978 2024-11-23T03:23:21,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/cde7911cb60b4f8491f59809e71b7285 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/cde7911cb60b4f8491f59809e71b7285 2024-11-23T03:23:21,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1eee56b2fa144184b04b1586410b5242 2024-11-23T03:23:21,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/3d1bcb44daa34d1294bd043a755fd217 2024-11-23T03:23:21,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/314c4034a4c14b0a97656a08bd4ca6f7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/314c4034a4c14b0a97656a08bd4ca6f7 2024-11-23T03:23:21,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/5da52673950c4ddbb2ad59c226f1f7be 2024-11-23T03:23:21,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/528672d0365d4506891fd42afbb92942 2024-11-23T03:23:21,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b3422d4e4c2540958e6ff25b169e98d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b3422d4e4c2540958e6ff25b169e98d7 2024-11-23T03:23:21,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/66da570d51724520890fa0be90a0afcb 2024-11-23T03:23:21,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec612e5966e84d7e867f18acee6c9fc7 2024-11-23T03:23:21,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f0ac177ae3f4bc9a0f9db4af35070af 2024-11-23T03:23:21,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/017ded80f3ef4523a15a4c73804c5112 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/017ded80f3ef4523a15a4c73804c5112 2024-11-23T03:23:21,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/c6d5f835b02f4d158d1a71a4f6d61269 2024-11-23T03:23:21,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/1f7fa71a890d4e10bd0f4d9b928bfe90 2024-11-23T03:23:21,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/a8fdd84621d240839e6cd1a03ca4fd68 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/a8fdd84621d240839e6cd1a03ca4fd68 2024-11-23T03:23:21,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/2c46abb20396412b88fb77b8bc1356e9 2024-11-23T03:23:21,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d5a8f74774b647ed857e2f9a5a738285 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d5a8f74774b647ed857e2f9a5a738285 2024-11-23T03:23:21,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fe5d07ab92804a3bafb0cbd116bd6c52 2024-11-23T03:23:21,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ce20ad56aba94afbbd617fdd80005004 2024-11-23T03:23:21,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8824f3cf1bda4ee1a25cb1fa2ed8806b 2024-11-23T03:23:21,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f9ecb3da1fb44e4b16dcc946e15dfd1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/9f9ecb3da1fb44e4b16dcc946e15dfd1 2024-11-23T03:23:21,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/86d4c05f004b49cba5a048993c5923d6 2024-11-23T03:23:21,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/e97c6a31469547c5bb516013c4259aa0 2024-11-23T03:23:21,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/84fe6d243e924bad83ac837262c61c04 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/84fe6d243e924bad83ac837262c61c04 2024-11-23T03:23:21,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d6c98aad94114dddaf846da79555c1c6 2024-11-23T03:23:21,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fb7ae969d6c34ab2bc2fb7993d8cd901 2024-11-23T03:23:21,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/4e4ec4b6ce904ff4ae7166ff676db9df to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/4e4ec4b6ce904ff4ae7166ff676db9df 2024-11-23T03:23:21,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/fedc6c895e90403490b66861465e760a 2024-11-23T03:23:21,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/13cacbe335a14e6b88ab5b8087bc7e9d 2024-11-23T03:23:21,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/426ce94d51c94a06bea36d68d9953e46 2024-11-23T03:23:21,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/77d8e16f222646508cb9c64804e70133 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/77d8e16f222646508cb9c64804e70133 2024-11-23T03:23:21,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8a53a66305744ca9bc78db9d6b204fa8 2024-11-23T03:23:21,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/d8260c65b0c0497fabf160435db6316b 2024-11-23T03:23:21,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/03419d62659d4c90bd0c098274ff6780 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/03419d62659d4c90bd0c098274ff6780 2024-11-23T03:23:21,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/8136108e69fe4d558d049defe021b963 2024-11-23T03:23:21,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/ec9e55d0aaa74b58820642a81ab4eecf 2024-11-23T03:23:21,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/32176c09b4224e908e64458f9e81780a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cf60454ee79e4ba7a1693afa56ff8d8d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/06397bcc81bb4338877f79d58409396e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/97c3e4c42a874195aa6ef6a54f406453, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/df2123923e1a46f69e28ec7cbe9c831e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/944c395f1def4949abf0efb164f82b9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e28a2978cf843fb91fd0154fe919617, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ce98949cf763421995a47487d1c2b946, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/086ad1d0e3874d9a8134ae63ea419b08, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/eeb5a2320ebe40e98c0a08457ba256fa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6f5e31a5bb954780b2f00cf59c1c06e2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83] to archive 2024-11-23T03:23:21,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:21,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b92d84138f974d66aa3b7732660798f5 2024-11-23T03:23:21,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/72c5bf153ad643ea8bc721ac98a08f37 2024-11-23T03:23:21,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/32176c09b4224e908e64458f9e81780a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/32176c09b4224e908e64458f9e81780a 2024-11-23T03:23:21,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/78ae098ed51b4b319160bf253f13a1a8 2024-11-23T03:23:21,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/818f055a9051476a8461d0805b755472 2024-11-23T03:23:21,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cf60454ee79e4ba7a1693afa56ff8d8d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cf60454ee79e4ba7a1693afa56ff8d8d 2024-11-23T03:23:21,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65be7c0150514d378c8b66c3de5a30ba 2024-11-23T03:23:21,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/49180ed55ac3471e8491adcf3097f38b 2024-11-23T03:23:21,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/06397bcc81bb4338877f79d58409396e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/06397bcc81bb4338877f79d58409396e 2024-11-23T03:23:21,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/b22f4befde6f43cbbebadea3ae9a73cf 2024-11-23T03:23:21,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a4153b374a7441e39b23d290f550f6ab 2024-11-23T03:23:21,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3258694ee8b848a7bccf3bb226235336 2024-11-23T03:23:21,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/97c3e4c42a874195aa6ef6a54f406453 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/97c3e4c42a874195aa6ef6a54f406453 2024-11-23T03:23:21,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/d64d98a9bda241e1b8ad02b3fbe0664c 2024-11-23T03:23:21,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/88ec852c218748b6b06b48281dbb384e 2024-11-23T03:23:21,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/df2123923e1a46f69e28ec7cbe9c831e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/df2123923e1a46f69e28ec7cbe9c831e 2024-11-23T03:23:21,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e6f9abe4dd84a63bd81e51f7924b837 2024-11-23T03:23:21,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/e13b801bfc844d1aadc6f2f6315a2cd3 2024-11-23T03:23:21,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/944c395f1def4949abf0efb164f82b9d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/944c395f1def4949abf0efb164f82b9d 2024-11-23T03:23:21,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/535db1960b554f2390558ae3147fe054 2024-11-23T03:23:21,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/a9453d8de9e240968211c31680e8c642 2024-11-23T03:23:21,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e28a2978cf843fb91fd0154fe919617 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3e28a2978cf843fb91fd0154fe919617 2024-11-23T03:23:21,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/006aec99cdce4483ae0f9c2232713e05 2024-11-23T03:23:21,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6341df4adef349ff8af1f5b6ab68e936 2024-11-23T03:23:21,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ce98949cf763421995a47487d1c2b946 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ce98949cf763421995a47487d1c2b946 2024-11-23T03:23:21,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/43c3a73c89354394b2699439305b1c1f 2024-11-23T03:23:21,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/cd4bc5bcedb24483a6253f671f0cc476 2024-11-23T03:23:21,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/086ad1d0e3874d9a8134ae63ea419b08 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/086ad1d0e3874d9a8134ae63ea419b08 2024-11-23T03:23:21,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/17c609c531ac402f9f007a68d1bfcd82 2024-11-23T03:23:21,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/65f75e9fffbe4979857c36310b5d466d 2024-11-23T03:23:21,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bd7029abec9f4ebd81533bd09cb8ce8f 2024-11-23T03:23:21,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/eeb5a2320ebe40e98c0a08457ba256fa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/eeb5a2320ebe40e98c0a08457ba256fa 2024-11-23T03:23:21,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/0c292e2bb63d42ea980f2da5f3816b8e 2024-11-23T03:23:21,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/ff6a8d8645364ea58e8422d34f019cb3 2024-11-23T03:23:21,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6f5e31a5bb954780b2f00cf59c1c06e2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6f5e31a5bb954780b2f00cf59c1c06e2 2024-11-23T03:23:21,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/6fe35a360eca45b892f7193bd19558ca 2024-11-23T03:23:21,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/02110fbd03574cf2bee5f756390e2c94 2024-11-23T03:23:21,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/22328963720e46fe82ad23d43d7f3e83 2024-11-23T03:23:21,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e7337271593d4df2a5ce1449a46db554, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7629c27b27484e7b8df440e08ea523ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/9075986907d04777861cc248b221d272, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/18ebbbe4e16442a0bbf75b5b392db4b2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/0c706ced1a1443a89ad81c572bdd52fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/05bfaba224c14e8d86daf94353139d86, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/da29af4faea248ca878dff5a32f3b14a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/d840d6d5800e47ee866aab04f702ef7d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a0a3f0c3cd59452fb72b96affef4ec9f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/b837fdcadd3c472095c3ba8b6982308d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/66f7352e9e204357af0058ad39701467, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880] to archive 2024-11-23T03:23:21,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:21,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/3a17a79cef044612ad15ba9e1031c2d3 2024-11-23T03:23:21,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bc14e4d43ce044038cde82928ff3c6d3 2024-11-23T03:23:21,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e7337271593d4df2a5ce1449a46db554 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e7337271593d4df2a5ce1449a46db554 2024-11-23T03:23:21,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f2e5830695b24c88bb50cf8825d3fab2 2024-11-23T03:23:21,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/46890ed0d05c421a9e00397a3a92ddf8 2024-11-23T03:23:21,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7629c27b27484e7b8df440e08ea523ac to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7629c27b27484e7b8df440e08ea523ac 2024-11-23T03:23:21,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f6e19c6790814b79b747e07d8652d1f6 2024-11-23T03:23:21,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/4d106749dcbc4cf3a8aac99d14e90d14 2024-11-23T03:23:21,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/9075986907d04777861cc248b221d272 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/9075986907d04777861cc248b221d272 2024-11-23T03:23:21,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/97c45b5a6dbc4277acc4c135d2b890e8 2024-11-23T03:23:21,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c54d99310cb447a5bc842f52b525275d 2024-11-23T03:23:21,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a7a0eede18b84b898877a6a512a0b060 2024-11-23T03:23:21,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/18ebbbe4e16442a0bbf75b5b392db4b2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/18ebbbe4e16442a0bbf75b5b392db4b2 2024-11-23T03:23:21,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/41b63e648417486ab18a4c5d319239aa 2024-11-23T03:23:21,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/2a2f3f19b21049aca85a7d6edd56af89 2024-11-23T03:23:21,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/0c706ced1a1443a89ad81c572bdd52fc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/0c706ced1a1443a89ad81c572bdd52fc 2024-11-23T03:23:21,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/e8bb6686a45a4415a3f2a2e6652abde1 2024-11-23T03:23:21,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c30ac18ac5954c65be0313f8180e326d 2024-11-23T03:23:21,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/05bfaba224c14e8d86daf94353139d86 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/05bfaba224c14e8d86daf94353139d86 2024-11-23T03:23:21,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/96442fd556f84b428bfafe5024eaba78 2024-11-23T03:23:21,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/91a00fb6369c4afe9fb52a009b3fa901 2024-11-23T03:23:21,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/da29af4faea248ca878dff5a32f3b14a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/da29af4faea248ca878dff5a32f3b14a 2024-11-23T03:23:21,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/c8c10b9e583648499a18be08d1977b46 2024-11-23T03:23:21,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/f32f6be15c594023b68ef0b8585f3054 2024-11-23T03:23:21,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/d840d6d5800e47ee866aab04f702ef7d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/d840d6d5800e47ee866aab04f702ef7d 2024-11-23T03:23:21,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/80da2f6cf3724f7c84745ae961e7d64a 2024-11-23T03:23:21,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/7d194bd6228e4ad68a4c6beb36d16fef 2024-11-23T03:23:21,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a0a3f0c3cd59452fb72b96affef4ec9f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/a0a3f0c3cd59452fb72b96affef4ec9f 2024-11-23T03:23:21,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/bba28ec2c6bd47d9bc9b29476924e6e9 2024-11-23T03:23:21,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/baa5743e6af9491395150aac19e6067a 2024-11-23T03:23:21,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/fcf558a5d7564838b908dc313400d790 2024-11-23T03:23:21,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/b837fdcadd3c472095c3ba8b6982308d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/b837fdcadd3c472095c3ba8b6982308d 2024-11-23T03:23:21,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/73808bc22046430982fdc32bd6bb75fd 2024-11-23T03:23:21,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/8b08e3652bc247ab982336f41b743e61 2024-11-23T03:23:21,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/66f7352e9e204357af0058ad39701467 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/66f7352e9e204357af0058ad39701467 2024-11-23T03:23:21,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/89f7e382662a4598a175dd7ece23a39c 2024-11-23T03:23:21,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/eedb1d2f41df4c8d8cea4894b3126661 2024-11-23T03:23:21,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/50d4055151d743fdb140a50abaebe880 2024-11-23T03:23:21,664 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/recovered.edits/565.seqid, newMaxSeqId=565, maxSeqId=1 2024-11-23T03:23:21,664 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c. 2024-11-23T03:23:21,664 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for 6a5f5cf259ce583546a3c6bfa36ac47c: 2024-11-23T03:23:21,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=6a5f5cf259ce583546a3c6bfa36ac47c, regionState=CLOSED 2024-11-23T03:23:21,666 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed 6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:21,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-23T03:23:21,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure 6a5f5cf259ce583546a3c6bfa36ac47c, server=0d51875c74df,34141,1732332039937 in 1.5920 sec 2024-11-23T03:23:21,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-23T03:23:21,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a5f5cf259ce583546a3c6bfa36ac47c, UNASSIGN in 1.5970 sec 2024-11-23T03:23:21,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-23T03:23:21,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6010 sec 2024-11-23T03:23:21,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332201673"}]},"ts":"1732332201673"} 2024-11-23T03:23:21,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:23:21,677 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:23:21,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6130 sec 2024-11-23T03:23:22,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T03:23:22,170 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-23T03:23:22,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:23:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,172 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-23T03:23:22,173 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=150, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,175 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:22,177 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/recovered.edits] 2024-11-23T03:23:22,179 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/45208307604a47b9a781da1778a2a8a4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/45208307604a47b9a781da1778a2a8a4 2024-11-23T03:23:22,180 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/630a0d553c8d493db09c58906653c92c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/630a0d553c8d493db09c58906653c92c 2024-11-23T03:23:22,182 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b899bf9731004fb49d97240492c2e5b7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/A/b899bf9731004fb49d97240492c2e5b7 2024-11-23T03:23:22,184 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3c597ee96efa4002a574cc1e330a5576 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/3c597ee96efa4002a574cc1e330a5576 2024-11-23T03:23:22,185 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bb7e65d890a84cbd8c68ccaebfb68ccc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/bb7e65d890a84cbd8c68ccaebfb68ccc 2024-11-23T03:23:22,186 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/f2462dafa5a8491380fe6f1ad2deae01 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/B/f2462dafa5a8491380fe6f1ad2deae01 2024-11-23T03:23:22,188 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/1fe0b0ccda38433bae20df013b7353cc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/1fe0b0ccda38433bae20df013b7353cc 2024-11-23T03:23:22,189 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/40dfcd4970f8499d8a3f0757f81b9a70 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/40dfcd4970f8499d8a3f0757f81b9a70 2024-11-23T03:23:22,190 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/de98abb720a04d48a4eaa89b1436175d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/C/de98abb720a04d48a4eaa89b1436175d 2024-11-23T03:23:22,192 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/recovered.edits/565.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c/recovered.edits/565.seqid 2024-11-23T03:23:22,193 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/6a5f5cf259ce583546a3c6bfa36ac47c 2024-11-23T03:23:22,193 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:23:22,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=150, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,199 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:23:22,201 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:23:22,201 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=150, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,201 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:23:22,201 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332202201"}]},"ts":"9223372036854775807"} 2024-11-23T03:23:22,203 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:23:22,203 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6a5f5cf259ce583546a3c6bfa36ac47c, NAME => 'TestAcidGuarantees,,1732332174140.6a5f5cf259ce583546a3c6bfa36ac47c.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:23:22,203 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:23:22,203 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332202203"}]},"ts":"9223372036854775807"} 2024-11-23T03:23:22,205 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:23:22,208 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=150, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 37 msec 2024-11-23T03:23:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-23T03:23:22,274 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-23T03:23:22,285 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=240 (was 238) - Thread LEAK? -, OpenFileDescriptor=451 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=618 (was 550) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3792 (was 3877) 2024-11-23T03:23:22,295 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=240, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=618, ProcessCount=11, AvailableMemoryMB=3792 2024-11-23T03:23:22,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:23:22,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:23:22,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:22,298 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T03:23:22,298 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:22,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 151 2024-11-23T03:23:22,299 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T03:23:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T03:23:22,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742424_1600 (size=960) 2024-11-23T03:23:22,309 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417 2024-11-23T03:23:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742425_1601 (size=53) 2024-11-23T03:23:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T03:23:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing badf71fee211af0b660972c46cad8684, disabling compactions & flushes 2024-11-23T03:23:22,717 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. after waiting 0 ms 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:22,717 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:22,717 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:22,719 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T03:23:22,719 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732332202719"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732332202719"}]},"ts":"1732332202719"} 2024-11-23T03:23:22,720 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T03:23:22,721 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T03:23:22,721 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332202721"}]},"ts":"1732332202721"} 2024-11-23T03:23:22,722 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T03:23:22,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, ASSIGN}] 2024-11-23T03:23:22,728 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, ASSIGN 2024-11-23T03:23:22,729 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, ASSIGN; state=OFFLINE, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=false 2024-11-23T03:23:22,880 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:22,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; OpenRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:23:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T03:23:23,033 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:23,038 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,038 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7285): Opening region: {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:23:23,039 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,039 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:23:23,039 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7327): checking encryption for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,039 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7330): checking classloading for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,041 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,043 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:23,043 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName A 2024-11-23T03:23:23,043 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:23,044 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:23,044 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,045 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:23,045 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName B 2024-11-23T03:23:23,045 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:23,046 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:23,046 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,047 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:23,047 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName C 2024-11-23T03:23:23,048 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:23,048 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:23,048 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,049 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,049 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,052 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:23:23,053 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1085): writing seq id for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,055 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T03:23:23,055 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1102): Opened badf71fee211af0b660972c46cad8684; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69373933, jitterRate=0.03375215828418732}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:23:23,056 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1001): Region open journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:23,056 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., pid=153, masterSystemTime=1732332203033 2024-11-23T03:23:23,058 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,058 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,058 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=OPEN, openSeqNum=2, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:23,060 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-23T03:23:23,060 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; OpenRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 in 178 msec 2024-11-23T03:23:23,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-23T03:23:23,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, ASSIGN in 333 msec 2024-11-23T03:23:23,062 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T03:23:23,062 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332203062"}]},"ts":"1732332203062"} 2024-11-23T03:23:23,063 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T03:23:23,065 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T03:23:23,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 768 msec 2024-11-23T03:23:23,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T03:23:23,403 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-23T03:23:23,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cbfd84f to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2209c520 2024-11-23T03:23:23,411 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765d46a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:23,412 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:23,414 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:23,415 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T03:23:23,416 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T03:23:23,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T03:23:23,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T03:23:23,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742426_1602 (size=996) 2024-11-23T03:23:23,828 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T03:23:23,828 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T03:23:23,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:23:23,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, REOPEN/MOVE}] 2024-11-23T03:23:23,833 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, REOPEN/MOVE 2024-11-23T03:23:23,833 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:23,834 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:23:23,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:23:23,986 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:23,986 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,986 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:23:23,986 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing badf71fee211af0b660972c46cad8684, disabling compactions & flushes 2024-11-23T03:23:23,986 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,986 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,986 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. after waiting 0 ms 2024-11-23T03:23:23,986 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,990 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T03:23:23,991 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:23,991 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:23,991 WARN [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionServer(3786): Not adding moved region record: badf71fee211af0b660972c46cad8684 to self. 2024-11-23T03:23:23,992 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed badf71fee211af0b660972c46cad8684 2024-11-23T03:23:23,993 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=CLOSED 2024-11-23T03:23:23,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-23T03:23:23,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 in 162 msec 2024-11-23T03:23:23,998 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, REOPEN/MOVE; state=CLOSED, location=0d51875c74df,34141,1732332039937; forceNewPlan=false, retain=true 2024-11-23T03:23:24,148 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=OPENING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=156, state=RUNNABLE; OpenRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:23:24,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,305 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,305 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7285): Opening region: {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} 2024-11-23T03:23:24,305 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,305 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T03:23:24,306 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7327): checking encryption for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,306 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7330): checking classloading for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,307 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,308 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:24,308 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName A 2024-11-23T03:23:24,309 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:24,309 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:24,310 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,310 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:24,310 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName B 2024-11-23T03:23:24,310 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:24,311 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:24,311 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,311 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T03:23:24,312 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region badf71fee211af0b660972c46cad8684 columnFamilyName C 2024-11-23T03:23:24,312 DEBUG [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:24,312 INFO [StoreOpener-badf71fee211af0b660972c46cad8684-1 {}] regionserver.HStore(327): Store=badf71fee211af0b660972c46cad8684/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T03:23:24,312 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,313 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,314 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,315 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T03:23:24,317 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1085): writing seq id for badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,317 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1102): Opened badf71fee211af0b660972c46cad8684; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62309542, jitterRate=-0.07151547074317932}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T03:23:24,318 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1001): Region open journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:24,318 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., pid=158, masterSystemTime=1732332204302 2024-11-23T03:23:24,320 DEBUG [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,320 INFO [RS_OPEN_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,321 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=OPEN, openSeqNum=5, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-11-23T03:23:24,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; OpenRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 in 172 msec 2024-11-23T03:23:24,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-23T03:23:24,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, REOPEN/MOVE in 491 msec 2024-11-23T03:23:24,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-23T03:23:24,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 494 msec 2024-11-23T03:23:24,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 908 msec 2024-11-23T03:23:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-23T03:23:24,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fb684eb to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@537a66f8 2024-11-23T03:23:24,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac53e79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,335 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-11-23T03:23:24,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,339 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-11-23T03:23:24,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,343 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-11-23T03:23:24,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-11-23T03:23:24,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,353 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-23T03:23:24,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,358 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-11-23T03:23:24,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,369 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-11-23T03:23:24,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,377 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-11-23T03:23:24,380 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,380 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:61411 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-11-23T03:23:24,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T03:23:24,392 DEBUG [hconnection-0x5f157888-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,393 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,396 DEBUG [hconnection-0x1fbe8f8f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,396 DEBUG [hconnection-0x4d7b92e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,397 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,397 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-23T03:23:24,404 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:24,405 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:24,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T03:23:24,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:24,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:24,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:24,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:24,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:24,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:24,408 DEBUG [hconnection-0x43e3b84e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,409 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,410 DEBUG [hconnection-0x2c89bdda-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,410 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,412 DEBUG [hconnection-0x3abaf249-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,413 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,421 DEBUG [hconnection-0xa0b662-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,422 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,424 DEBUG [hconnection-0x59c5929d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,425 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332264425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332264425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332264426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332264426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,428 DEBUG [hconnection-0x2e7e0cf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,429 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332264431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,432 DEBUG [hconnection-0x23346561-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T03:23:24,433 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T03:23:24,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e729150cda9f401e9f8b3b90814780dd_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332204404/Put/seqid=0 2024-11-23T03:23:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:24,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742427_1603 (size=12154) 2024-11-23T03:23:24,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332264527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332264528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332264529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332264529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332264534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:24,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332264731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332264731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332264732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332264733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332264736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:24,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:24,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:24,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:24,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:24,920 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:24,924 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e729150cda9f401e9f8b3b90814780dd_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e729150cda9f401e9f8b3b90814780dd_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:24,925 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/dda6f305b7294d48bd16cdc402d2f7ce, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:24,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/dda6f305b7294d48bd16cdc402d2f7ce is 175, key is test_row_0/A:col10/1732332204404/Put/seqid=0 2024-11-23T03:23:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742428_1604 (size=30955) 2024-11-23T03:23:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:25,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:25,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:25,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332265035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332265036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332265036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332265037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332265039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:25,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:25,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:25,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:25,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,366 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/dda6f305b7294d48bd16cdc402d2f7ce 2024-11-23T03:23:25,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/5b8013494fe6414ca933c4e11e68817d is 50, key is test_row_0/B:col10/1732332204404/Put/seqid=0 2024-11-23T03:23:25,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742429_1605 (size=12001) 2024-11-23T03:23:25,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/5b8013494fe6414ca933c4e11e68817d 2024-11-23T03:23:25,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:25,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/2be0c590876447d78b7f0c7f00f74ca7 is 50, key is test_row_0/C:col10/1732332204404/Put/seqid=0 2024-11-23T03:23:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:25,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742430_1606 (size=12001) 2024-11-23T03:23:25,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/2be0c590876447d78b7f0c7f00f74ca7 2024-11-23T03:23:25,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332265542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332265542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332265543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/dda6f305b7294d48bd16cdc402d2f7ce as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce 2024-11-23T03:23:25,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332265545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:25,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332265546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce, entries=150, sequenceid=16, filesize=30.2 K 2024-11-23T03:23:25,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/5b8013494fe6414ca933c4e11e68817d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d 2024-11-23T03:23:25,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T03:23:25,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/2be0c590876447d78b7f0c7f00f74ca7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7 2024-11-23T03:23:25,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T03:23:25,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for badf71fee211af0b660972c46cad8684 in 1165ms, sequenceid=16, compaction requested=false 2024-11-23T03:23:25,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:25,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:25,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T03:23:25,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:25,630 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T03:23:25,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:25,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:25,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:25,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237d98af99484b4c9aaa0cb486561e6899_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332204420/Put/seqid=0 2024-11-23T03:23:25,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742431_1607 (size=12154) 2024-11-23T03:23:26,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:26,080 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237d98af99484b4c9aaa0cb486561e6899_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d98af99484b4c9aaa0cb486561e6899_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:26,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/3ff7dc89505c4bcda8fe75ec3ce2f152, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:26,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/3ff7dc89505c4bcda8fe75ec3ce2f152 is 175, key is test_row_0/A:col10/1732332204420/Put/seqid=0 2024-11-23T03:23:26,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742432_1608 (size=30955) 2024-11-23T03:23:26,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:26,530 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/3ff7dc89505c4bcda8fe75ec3ce2f152 2024-11-23T03:23:26,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a is 50, key is test_row_0/B:col10/1732332204420/Put/seqid=0 2024-11-23T03:23:26,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:26,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:26,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332266560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332266562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332266564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332266565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332266568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742433_1609 (size=12001) 2024-11-23T03:23:26,600 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a 2024-11-23T03:23:26,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/ba1b1b78e7924cc3bff2b5b3f68a9942 is 50, key is test_row_0/C:col10/1732332204420/Put/seqid=0 2024-11-23T03:23:26,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742434_1610 (size=12001) 2024-11-23T03:23:26,662 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/ba1b1b78e7924cc3bff2b5b3f68a9942 2024-11-23T03:23:26,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/3ff7dc89505c4bcda8fe75ec3ce2f152 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152 2024-11-23T03:23:26,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332266666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,673 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152, entries=150, sequenceid=40, filesize=30.2 K 2024-11-23T03:23:26,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a 2024-11-23T03:23:26,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332266670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332266670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332266672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,678 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T03:23:26,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332266674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/ba1b1b78e7924cc3bff2b5b3f68a9942 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942 2024-11-23T03:23:26,684 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T03:23:26,686 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for badf71fee211af0b660972c46cad8684 in 1055ms, sequenceid=40, compaction requested=false 2024-11-23T03:23:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-23T03:23:26,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-23T03:23:26,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T03:23:26,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2820 sec 2024-11-23T03:23:26,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.2870 sec 2024-11-23T03:23:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:26,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:26,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:26,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c548ea18f9e54873966a69321df80fb0_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:26,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742435_1611 (size=12154) 2024-11-23T03:23:26,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332266952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332266953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332266954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332266955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:26,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:26,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332266956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332267056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332267057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332267059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332267059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332267059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,201 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T03:23:27,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332267260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332267262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332267264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332267264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332267266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,296 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:27,301 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c548ea18f9e54873966a69321df80fb0_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c548ea18f9e54873966a69321df80fb0_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:27,302 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8a212921431d471086a150659ca40d66, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:27,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8a212921431d471086a150659ca40d66 is 175, key is test_row_0/A:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:27,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742436_1612 (size=30955) 2024-11-23T03:23:27,330 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8a212921431d471086a150659ca40d66 2024-11-23T03:23:27,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/fc84dca1acb94fb787f8d795c7b84d5e is 50, key is test_row_0/B:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:27,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742437_1613 (size=12001) 2024-11-23T03:23:27,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332267564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332267566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332267567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332267569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332267570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:27,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/fc84dca1acb94fb787f8d795c7b84d5e 2024-11-23T03:23:27,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1f26faf2bc5743049517c198bb2eb76d is 50, key is test_row_0/C:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:27,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742438_1614 (size=12001) 2024-11-23T03:23:27,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1f26faf2bc5743049517c198bb2eb76d 2024-11-23T03:23:27,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8a212921431d471086a150659ca40d66 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66 2024-11-23T03:23:27,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66, entries=150, sequenceid=54, filesize=30.2 K 2024-11-23T03:23:27,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/fc84dca1acb94fb787f8d795c7b84d5e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e 2024-11-23T03:23:27,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T03:23:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1f26faf2bc5743049517c198bb2eb76d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d 2024-11-23T03:23:27,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T03:23:27,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for badf71fee211af0b660972c46cad8684 in 994ms, sequenceid=54, compaction requested=true 2024-11-23T03:23:27,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:27,869 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:27,869 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:27,870 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:27,870 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=90.7 K 2024-11-23T03:23:27,870 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:27,870 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:27,870 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=35.2 K 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66] 2024-11-23T03:23:27,870 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b8013494fe6414ca933c4e11e68817d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732332204403 2024-11-23T03:23:27,871 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting dda6f305b7294d48bd16cdc402d2f7ce, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732332204403 2024-11-23T03:23:27,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e5f7d8d8247d4f82b5dd7fb6280f4e2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332204420 2024-11-23T03:23:27,871 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ff7dc89505c4bcda8fe75ec3ce2f152, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332204420 2024-11-23T03:23:27,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting fc84dca1acb94fb787f8d795c7b84d5e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:27,871 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a212921431d471086a150659ca40d66, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:27,880 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:27,881 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9a1f214c8d594a40882b7d34a360b3e9 is 50, key is test_row_0/B:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:27,888 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:27,892 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112335ca8f3ec7ef4649917518fbef7b4ce4_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:27,894 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112335ca8f3ec7ef4649917518fbef7b4ce4_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:27,895 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112335ca8f3ec7ef4649917518fbef7b4ce4_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:27,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742439_1615 (size=12104) 2024-11-23T03:23:27,920 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9a1f214c8d594a40882b7d34a360b3e9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a1f214c8d594a40882b7d34a360b3e9 2024-11-23T03:23:27,925 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 9a1f214c8d594a40882b7d34a360b3e9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:27,925 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:27,925 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332207869; duration=0sec 2024-11-23T03:23:27,925 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:27,925 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:27,926 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:27,928 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:27,928 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:27,928 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:27,928 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=35.2 K 2024-11-23T03:23:27,928 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2be0c590876447d78b7f0c7f00f74ca7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732332204403 2024-11-23T03:23:27,929 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ba1b1b78e7924cc3bff2b5b3f68a9942, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732332204420 2024-11-23T03:23:27,929 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f26faf2bc5743049517c198bb2eb76d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:27,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742440_1616 (size=4469) 2024-11-23T03:23:27,946 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#532 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:27,946 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/74be9a7a00b14b1c9080f6f7a41d74b4 is 50, key is test_row_0/C:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:27,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742441_1617 (size=12104) 2024-11-23T03:23:27,983 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/74be9a7a00b14b1c9080f6f7a41d74b4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/74be9a7a00b14b1c9080f6f7a41d74b4 2024-11-23T03:23:27,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into 74be9a7a00b14b1c9080f6f7a41d74b4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:27,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:27,990 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332207869; duration=0sec 2024-11-23T03:23:27,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:27,990 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:28,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:28,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:28,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112388fbecb400154a4c9a3aeda6c7ffc635_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332206943/Put/seqid=0 2024-11-23T03:23:28,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332268086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332268087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332268088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332268088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332268092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742442_1618 (size=12154) 2024-11-23T03:23:28,138 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:28,144 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112388fbecb400154a4c9a3aeda6c7ffc635_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112388fbecb400154a4c9a3aeda6c7ffc635_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:28,145 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/c1d170afe23d43cfbeadc2b505722727, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:28,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/c1d170afe23d43cfbeadc2b505722727 is 175, key is test_row_0/A:col10/1732332206943/Put/seqid=0 2024-11-23T03:23:28,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332268193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332268195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332268196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332268196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332268197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742443_1619 (size=30955) 2024-11-23T03:23:28,211 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/c1d170afe23d43cfbeadc2b505722727 2024-11-23T03:23:28,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/0d8b6889e65e48a397163068a29d9424 is 50, key is test_row_0/B:col10/1732332206943/Put/seqid=0 2024-11-23T03:23:28,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742444_1620 (size=12001) 2024-11-23T03:23:28,348 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#531 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:28,349 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/77d6d179794d4f9baa47735b58f813bf is 175, key is test_row_0/A:col10/1732332206562/Put/seqid=0 2024-11-23T03:23:28,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742445_1621 (size=31058) 2024-11-23T03:23:28,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332268398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332268399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332268399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332268399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,403 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/77d6d179794d4f9baa47735b58f813bf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf 2024-11-23T03:23:28,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332268403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,408 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 77d6d179794d4f9baa47735b58f813bf(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:28,408 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:28,408 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332207868; duration=0sec 2024-11-23T03:23:28,408 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:28,408 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:28,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T03:23:28,511 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-23T03:23:28,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-23T03:23:28,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T03:23:28,514 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:28,515 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:28,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:28,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T03:23:28,666 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T03:23:28,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:28,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:28,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:28,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:28,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:28,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:28,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/0d8b6889e65e48a397163068a29d9424 2024-11-23T03:23:28,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7adc88bfb22e46e8b3bf8aed3c300e78 is 50, key is test_row_0/C:col10/1732332206943/Put/seqid=0 2024-11-23T03:23:28,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332268701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332268702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332268702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332268703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:28,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332268707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742446_1622 (size=12001) 2024-11-23T03:23:28,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7adc88bfb22e46e8b3bf8aed3c300e78 2024-11-23T03:23:28,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/c1d170afe23d43cfbeadc2b505722727 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727 2024-11-23T03:23:28,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727, entries=150, sequenceid=79, filesize=30.2 K 2024-11-23T03:23:28,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/0d8b6889e65e48a397163068a29d9424 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424 2024-11-23T03:23:28,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T03:23:28,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7adc88bfb22e46e8b3bf8aed3c300e78 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78 2024-11-23T03:23:28,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T03:23:28,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for badf71fee211af0b660972c46cad8684 in 673ms, sequenceid=79, compaction requested=false 2024-11-23T03:23:28,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:28,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T03:23:28,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:28,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T03:23:28,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:28,821 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:28,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:28,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c538f3bd48bf43f99bf71995c3e50cb7_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332208087/Put/seqid=0 2024-11-23T03:23:28,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742447_1623 (size=12154) 2024-11-23T03:23:28,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:28,874 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c538f3bd48bf43f99bf71995c3e50cb7_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c538f3bd48bf43f99bf71995c3e50cb7_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:28,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/0e06602ac0754de08869957f02d6b877, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:28,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/0e06602ac0754de08869957f02d6b877 is 175, key is test_row_0/A:col10/1732332208087/Put/seqid=0 2024-11-23T03:23:28,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742448_1624 (size=30955) 2024-11-23T03:23:28,894 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/0e06602ac0754de08869957f02d6b877 2024-11-23T03:23:28,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9a5eadd947bc47e0b677b94a64ec1f00 is 50, key is test_row_0/B:col10/1732332208087/Put/seqid=0 2024-11-23T03:23:28,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742449_1625 (size=12001) 2024-11-23T03:23:29,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T03:23:29,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:29,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:29,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332269227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332269229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332269232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332269232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332269235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,310 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9a5eadd947bc47e0b677b94a64ec1f00 2024-11-23T03:23:29,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332269334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332269334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/fb2cdd62030d4148bbd66c808283ed72 is 50, key is test_row_0/C:col10/1732332208087/Put/seqid=0 2024-11-23T03:23:29,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332269340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332269341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332269342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742450_1626 (size=12001) 2024-11-23T03:23:29,358 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/fb2cdd62030d4148bbd66c808283ed72 2024-11-23T03:23:29,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/0e06602ac0754de08869957f02d6b877 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877 2024-11-23T03:23:29,369 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877, entries=150, sequenceid=95, filesize=30.2 K 2024-11-23T03:23:29,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9a5eadd947bc47e0b677b94a64ec1f00 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00 2024-11-23T03:23:29,373 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00, entries=150, sequenceid=95, filesize=11.7 K 2024-11-23T03:23:29,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/fb2cdd62030d4148bbd66c808283ed72 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72 2024-11-23T03:23:29,377 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72, entries=150, sequenceid=95, filesize=11.7 K 2024-11-23T03:23:29,377 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for badf71fee211af0b660972c46cad8684 in 556ms, sequenceid=95, compaction requested=true 2024-11-23T03:23:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-23T03:23:29,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-23T03:23:29,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-23T03:23:29,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 864 msec 2024-11-23T03:23:29,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 873 msec 2024-11-23T03:23:29,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:29,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:29,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112310105074e36a424daa1163ca6d0b1edb_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:29,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332269552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332269560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332269562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332269562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332269563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742451_1627 (size=14594) 2024-11-23T03:23:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T03:23:29,618 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-23T03:23:29,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-23T03:23:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:29,623 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:29,623 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:29,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:29,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332269662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332269666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332269667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332269667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332269668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:29,775 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:29,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332269866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332269870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332269870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332269871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:29,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332269871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:29,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:29,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:29,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:29,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:29,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:29,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:29,970 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:29,974 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112310105074e36a424daa1163ca6d0b1edb_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112310105074e36a424daa1163ca6d0b1edb_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:29,975 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/45986f38c56445aa9ab5f5acf973bace, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:29,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/45986f38c56445aa9ab5f5acf973bace is 175, key is test_row_0/A:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:29,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742452_1628 (size=39549) 2024-11-23T03:23:29,998 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/45986f38c56445aa9ab5f5acf973bace 2024-11-23T03:23:30,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8062376aacbf4d7f907dc3ce2eb4429c is 50, key is test_row_0/B:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:30,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742453_1629 (size=12001) 2024-11-23T03:23:30,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8062376aacbf4d7f907dc3ce2eb4429c 2024-11-23T03:23:30,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5d6e68e462c642a0a56eca93299e3373 is 50, key is test_row_0/C:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:30,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:30,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742454_1630 (size=12001) 2024-11-23T03:23:30,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5d6e68e462c642a0a56eca93299e3373 2024-11-23T03:23:30,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/45986f38c56445aa9ab5f5acf973bace as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace 2024-11-23T03:23:30,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace, entries=200, sequenceid=119, filesize=38.6 K 2024-11-23T03:23:30,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8062376aacbf4d7f907dc3ce2eb4429c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c 2024-11-23T03:23:30,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T03:23:30,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5d6e68e462c642a0a56eca93299e3373 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373 2024-11-23T03:23:30,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T03:23:30,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for badf71fee211af0b660972c46cad8684 in 604ms, sequenceid=119, compaction requested=true 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:30,145 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:30,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T03:23:30,145 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:30,148 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:30,149 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:30,149 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,149 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a1f214c8d594a40882b7d34a360b3e9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=47.0 K 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a1f214c8d594a40882b7d34a360b3e9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:30,150 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,150 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=129.4 K 2024-11-23T03:23:30,150 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace] 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d8b6889e65e48a397163068a29d9424, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332206943 2024-11-23T03:23:30,150 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d6d179794d4f9baa47735b58f813bf, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:30,151 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a5eadd947bc47e0b677b94a64ec1f00, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732332208080 2024-11-23T03:23:30,151 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c1d170afe23d43cfbeadc2b505722727, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332206943 2024-11-23T03:23:30,152 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e06602ac0754de08869957f02d6b877, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732332208080 2024-11-23T03:23:30,153 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8062376aacbf4d7f907dc3ce2eb4429c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209232 2024-11-23T03:23:30,155 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 45986f38c56445aa9ab5f5acf973bace, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209226 2024-11-23T03:23:30,170 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#542 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:30,171 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/643dfa6ccf6e4b1a847db73527b541c6 is 50, key is test_row_0/B:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:30,174 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:23:30,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:30,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:30,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:30,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:30,188 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411232f391c69ecaa4290be3ecd6008e3cba2_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,191 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411232f391c69ecaa4290be3ecd6008e3cba2_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,191 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232f391c69ecaa4290be3ecd6008e3cba2_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332270203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332270204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332270205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332270205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332270206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742455_1631 (size=12241) 2024-11-23T03:23:30,224 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/643dfa6ccf6e4b1a847db73527b541c6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/643dfa6ccf6e4b1a847db73527b541c6 2024-11-23T03:23:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:30,229 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 643dfa6ccf6e4b1a847db73527b541c6(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:30,229 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:30,229 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=12, startTime=1732332210145; duration=0sec 2024-11-23T03:23:30,229 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:30,230 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:30,230 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:30,233 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:30,233 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:30,233 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,233 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/74be9a7a00b14b1c9080f6f7a41d74b4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=47.0 K 2024-11-23T03:23:30,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235392907560aa4055b3d1b4547ce68af1_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332210173/Put/seqid=0 2024-11-23T03:23:30,234 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74be9a7a00b14b1c9080f6f7a41d74b4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732332206562 2024-11-23T03:23:30,234 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7adc88bfb22e46e8b3bf8aed3c300e78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732332206943 2024-11-23T03:23:30,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb2cdd62030d4148bbd66c808283ed72, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732332208080 2024-11-23T03:23:30,235 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d6e68e462c642a0a56eca93299e3373, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209232 2024-11-23T03:23:30,238 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:30,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,246 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#545 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:30,246 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d9aae13c0cbf4284a2a031faa35ccc10 is 50, key is test_row_0/C:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742456_1632 (size=4469) 2024-11-23T03:23:30,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742457_1633 (size=14744) 2024-11-23T03:23:30,283 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:30,286 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235392907560aa4055b3d1b4547ce68af1_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235392907560aa4055b3d1b4547ce68af1_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:30,288 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/6dbff4e442ab4f5f97a3771d3cd79b37, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/6dbff4e442ab4f5f97a3771d3cd79b37 is 175, key is test_row_0/A:col10/1732332210173/Put/seqid=0 2024-11-23T03:23:30,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332270311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332270311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332270311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742458_1634 (size=12241) 2024-11-23T03:23:30,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332270312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332270314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742459_1635 (size=39699) 2024-11-23T03:23:30,329 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/6dbff4e442ab4f5f97a3771d3cd79b37 2024-11-23T03:23:30,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8c9706d9092c4ef190a5ed0f2a2da8d3 is 50, key is test_row_0/B:col10/1732332210173/Put/seqid=0 2024-11-23T03:23:30,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742460_1636 (size=12101) 2024-11-23T03:23:30,392 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:30,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332270514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332270515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332270516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332270517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332270519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:30,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,659 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#543 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:30,659 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/810e4746a93e4f6cb160720f031cb566 is 175, key is test_row_0/A:col10/1732332209232/Put/seqid=0 2024-11-23T03:23:30,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742461_1637 (size=31195) 2024-11-23T03:23:30,698 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:30,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:30,721 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d9aae13c0cbf4284a2a031faa35ccc10 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d9aae13c0cbf4284a2a031faa35ccc10 2024-11-23T03:23:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:30,726 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into d9aae13c0cbf4284a2a031faa35ccc10(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:30,726 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:30,726 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=12, startTime=1732332210145; duration=0sec 2024-11-23T03:23:30,726 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:30,726 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:30,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8c9706d9092c4ef190a5ed0f2a2da8d3 2024-11-23T03:23:30,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/3bebdd67e9794226b582c1b6acf92661 is 50, key is test_row_0/C:col10/1732332210173/Put/seqid=0 2024-11-23T03:23:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742462_1638 (size=12101) 2024-11-23T03:23:30,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/3bebdd67e9794226b582c1b6acf92661 2024-11-23T03:23:30,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/6dbff4e442ab4f5f97a3771d3cd79b37 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37 2024-11-23T03:23:30,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332270820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332270820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332270823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332270823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:30,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332270825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37, entries=200, sequenceid=133, filesize=38.8 K 2024-11-23T03:23:30,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8c9706d9092c4ef190a5ed0f2a2da8d3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3 2024-11-23T03:23:30,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3, entries=150, sequenceid=133, filesize=11.8 K 2024-11-23T03:23:30,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/3bebdd67e9794226b582c1b6acf92661 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661 2024-11-23T03:23:30,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661, entries=150, sequenceid=133, filesize=11.8 K 2024-11-23T03:23:30,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for badf71fee211af0b660972c46cad8684 in 664ms, sequenceid=133, compaction requested=false 2024-11-23T03:23:30,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:30,851 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:30,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:30,852 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:30,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:30,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123def1a6b55a204f6da3bf7322ca65e789_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332210203/Put/seqid=0 2024-11-23T03:23:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742463_1639 (size=12304) 2024-11-23T03:23:30,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:30,894 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123def1a6b55a204f6da3bf7322ca65e789_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123def1a6b55a204f6da3bf7322ca65e789_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:30,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82fa4a1d67714cb4b37854a49520a907, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:30,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82fa4a1d67714cb4b37854a49520a907 is 175, key is test_row_0/A:col10/1732332210203/Put/seqid=0 2024-11-23T03:23:30,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742464_1640 (size=31105) 2024-11-23T03:23:30,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82fa4a1d67714cb4b37854a49520a907 2024-11-23T03:23:30,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/080a271e5b314afb968c9c684b6dde6e is 50, key is test_row_0/B:col10/1732332210203/Put/seqid=0 2024-11-23T03:23:30,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742465_1641 (size=12151) 2024-11-23T03:23:31,098 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/810e4746a93e4f6cb160720f031cb566 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566 2024-11-23T03:23:31,106 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 810e4746a93e4f6cb160720f031cb566(size=30.5 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:31,106 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:31,106 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=12, startTime=1732332210145; duration=0sec 2024-11-23T03:23:31,106 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:31,106 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:31,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:31,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:31,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332271339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332271339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332271340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332271341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332271343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,372 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/080a271e5b314afb968c9c684b6dde6e 2024-11-23T03:23:31,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1b871c78c01a4bceaf8d1d2dfa418caa is 50, key is test_row_0/C:col10/1732332210203/Put/seqid=0 2024-11-23T03:23:31,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742466_1642 (size=12151) 2024-11-23T03:23:31,410 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1b871c78c01a4bceaf8d1d2dfa418caa 2024-11-23T03:23:31,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82fa4a1d67714cb4b37854a49520a907 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907 2024-11-23T03:23:31,427 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907, entries=150, sequenceid=157, filesize=30.4 K 2024-11-23T03:23:31,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/080a271e5b314afb968c9c684b6dde6e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e 2024-11-23T03:23:31,431 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T03:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/1b871c78c01a4bceaf8d1d2dfa418caa as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa 2024-11-23T03:23:31,438 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T03:23:31,439 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for badf71fee211af0b660972c46cad8684 in 587ms, sequenceid=157, compaction requested=true 2024-11-23T03:23:31,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:31,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-23T03:23:31,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-23T03:23:31,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-23T03:23:31,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8180 sec 2024-11-23T03:23:31,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.8240 sec 2024-11-23T03:23:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:31,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238f000e2ce9e24def9fe83be08f457e17_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742467_1643 (size=12304) 2024-11-23T03:23:31,471 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:31,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332271468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332271470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,476 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238f000e2ce9e24def9fe83be08f457e17_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238f000e2ce9e24def9fe83be08f457e17_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:31,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332271472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332271474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332271473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,478 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b50aaa6e660847c6904afbf6cb1a74db, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b50aaa6e660847c6904afbf6cb1a74db is 175, key is test_row_0/A:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742468_1644 (size=31105) 2024-11-23T03:23:31,507 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b50aaa6e660847c6904afbf6cb1a74db 2024-11-23T03:23:31,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/4fd90677428945e7bc29eb1f992a8a3f is 50, key is test_row_0/B:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742469_1645 (size=12151) 2024-11-23T03:23:31,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/4fd90677428945e7bc29eb1f992a8a3f 2024-11-23T03:23:31,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/a08291b3593b4e529a21364dbb7418bf is 50, key is test_row_0/C:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742470_1646 (size=12151) 2024-11-23T03:23:31,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/a08291b3593b4e529a21364dbb7418bf 2024-11-23T03:23:31,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332271575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332271577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b50aaa6e660847c6904afbf6cb1a74db as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db 2024-11-23T03:23:31,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332271580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332271580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332271580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db, entries=150, sequenceid=173, filesize=30.4 K 2024-11-23T03:23:31,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/4fd90677428945e7bc29eb1f992a8a3f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f 2024-11-23T03:23:31,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f, entries=150, sequenceid=173, filesize=11.9 K 2024-11-23T03:23:31,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/a08291b3593b4e529a21364dbb7418bf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf 2024-11-23T03:23:31,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf, entries=150, sequenceid=173, filesize=11.9 K 2024-11-23T03:23:31,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for badf71fee211af0b660972c46cad8684 in 156ms, sequenceid=173, compaction requested=true 2024-11-23T03:23:31,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:31,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:31,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:31,603 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:31,603 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:31,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:31,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:31,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:31,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:31,608 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:31,608 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:31,609 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,609 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/643dfa6ccf6e4b1a847db73527b541c6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=47.5 K 2024-11-23T03:23:31,609 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133104 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:31,609 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:31,609 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,609 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=130.0 K 2024-11-23T03:23:31,609 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,609 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db] 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 643dfa6ccf6e4b1a847db73527b541c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209232 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 810e4746a93e4f6cb160720f031cb566, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209232 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c9706d9092c4ef190a5ed0f2a2da8d3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732332209550 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dbff4e442ab4f5f97a3771d3cd79b37, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732332209550 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 080a271e5b314afb968c9c684b6dde6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332210203 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82fa4a1d67714cb4b37854a49520a907, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332210203 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fd90677428945e7bc29eb1f992a8a3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:31,610 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b50aaa6e660847c6904afbf6cb1a74db, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:31,635 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#554 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:31,635 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/f7ecba21e769401c804c33fc021c76a4 is 50, key is test_row_0/B:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,640 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,648 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123819189bf59024c1a9cef73c476308709_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,651 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123819189bf59024c1a9cef73c476308709_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,651 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123819189bf59024c1a9cef73c476308709_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742471_1647 (size=12527) 2024-11-23T03:23:31,681 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/f7ecba21e769401c804c33fc021c76a4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f7ecba21e769401c804c33fc021c76a4 2024-11-23T03:23:31,688 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into f7ecba21e769401c804c33fc021c76a4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:31,688 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:31,688 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=12, startTime=1732332211603; duration=0sec 2024-11-23T03:23:31,688 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:31,688 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:31,689 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:31,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742472_1648 (size=4469) 2024-11-23T03:23:31,698 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:31,698 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:31,698 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,699 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d9aae13c0cbf4284a2a031faa35ccc10, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=47.5 K 2024-11-23T03:23:31,699 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d9aae13c0cbf4284a2a031faa35ccc10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732332209232 2024-11-23T03:23:31,699 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bebdd67e9794226b582c1b6acf92661, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732332209550 2024-11-23T03:23:31,700 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b871c78c01a4bceaf8d1d2dfa418caa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732332210203 2024-11-23T03:23:31,701 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a08291b3593b4e529a21364dbb7418bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:31,714 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#556 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:31,715 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/2deb66fae83f4d34a4d6b53bc0f288d1 is 50, key is test_row_0/C:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T03:23:31,727 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-23T03:23:31,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-23T03:23:31,731 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:31,731 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:31,731 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:31,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:31,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:31,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742473_1649 (size=12527) 2024-11-23T03:23:31,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332271793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332271794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332271795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332271798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332271799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cf1ac1466b6f4b9ca98c0f8abf8ff6f3_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332211468/Put/seqid=0 2024-11-23T03:23:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:31,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742474_1650 (size=14794) 2024-11-23T03:23:31,869 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:31,875 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cf1ac1466b6f4b9ca98c0f8abf8ff6f3_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cf1ac1466b6f4b9ca98c0f8abf8ff6f3_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:31,875 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee2427732d1542c7b9ca584b07c5c38f, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:31,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee2427732d1542c7b9ca584b07c5c38f is 175, key is test_row_0/A:col10/1732332211468/Put/seqid=0 2024-11-23T03:23:31,884 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T03:23:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:31,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:31,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332271900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332271901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332271901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332271905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742475_1651 (size=39749) 2024-11-23T03:23:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332271906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:31,912 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee2427732d1542c7b9ca584b07c5c38f 2024-11-23T03:23:31,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8dd548c7fbd542459c3e467e6e5be6f1 is 50, key is test_row_0/B:col10/1732332211468/Put/seqid=0 2024-11-23T03:23:31,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742476_1652 (size=12151) 2024-11-23T03:23:31,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8dd548c7fbd542459c3e467e6e5be6f1 2024-11-23T03:23:31,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/784c2400359045a28718769a0e09f7fc is 50, key is test_row_0/C:col10/1732332211468/Put/seqid=0 2024-11-23T03:23:32,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742477_1653 (size=12151) 2024-11-23T03:23:32,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/784c2400359045a28718769a0e09f7fc 2024-11-23T03:23:32,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee2427732d1542c7b9ca584b07c5c38f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f 2024-11-23T03:23:32,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f, entries=200, sequenceid=197, filesize=38.8 K 2024-11-23T03:23:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:32,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8dd548c7fbd542459c3e467e6e5be6f1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1 2024-11-23T03:23:32,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1, entries=150, sequenceid=197, filesize=11.9 K 2024-11-23T03:23:32,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T03:23:32,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/784c2400359045a28718769a0e09f7fc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc 2024-11-23T03:23:32,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:32,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc, entries=150, sequenceid=197, filesize=11.9 K 2024-11-23T03:23:32,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for badf71fee211af0b660972c46cad8684 in 266ms, sequenceid=197, compaction requested=false 2024-11-23T03:23:32,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,091 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#555 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:32,092 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/7ad3237426384b4d8d87742ffb9d32bd is 175, key is test_row_0/A:col10/1732332211341/Put/seqid=0 2024-11-23T03:23:32,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742478_1654 (size=31481) 2024-11-23T03:23:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:32,117 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/7ad3237426384b4d8d87742ffb9d32bd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd 2024-11-23T03:23:32,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230de11b4929a043759f5bd211ab45eb81_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,123 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 7ad3237426384b4d8d87742ffb9d32bd(size=30.7 K), total size for store is 69.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:32,123 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,123 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=12, startTime=1732332211603; duration=0sec 2024-11-23T03:23:32,123 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:32,123 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742479_1655 (size=14794) 2024-11-23T03:23:32,148 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:32,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332272143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332272144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332272146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332272147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332272147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,160 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230de11b4929a043759f5bd211ab45eb81_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230de11b4929a043759f5bd211ab45eb81_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:32,162 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/4b734440f88547999449951199ee5abc, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/4b734440f88547999449951199ee5abc is 175, key is test_row_0/A:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742480_1656 (size=39749) 2024-11-23T03:23:32,186 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/4b734440f88547999449951199ee5abc 2024-11-23T03:23:32,191 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/2deb66fae83f4d34a4d6b53bc0f288d1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2deb66fae83f4d34a4d6b53bc0f288d1 2024-11-23T03:23:32,191 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T03:23:32,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:32,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:32,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/f32cf223bbef4208bc419206f10efee5 is 50, key is test_row_0/B:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,203 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into 2deb66fae83f4d34a4d6b53bc0f288d1(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:32,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,203 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=12, startTime=1732332211604; duration=0sec 2024-11-23T03:23:32,203 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:32,204 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:32,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742481_1657 (size=12151) 2024-11-23T03:23:32,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/f32cf223bbef4208bc419206f10efee5 2024-11-23T03:23:32,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/655df039d757483a91fed56be81e4264 is 50, key is test_row_0/C:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332272254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332272254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332272255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332272257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332272257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742482_1658 (size=12151) 2024-11-23T03:23:32,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/655df039d757483a91fed56be81e4264 2024-11-23T03:23:32,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/4b734440f88547999449951199ee5abc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc 2024-11-23T03:23:32,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc, entries=200, sequenceid=211, filesize=38.8 K 2024-11-23T03:23:32,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/f32cf223bbef4208bc419206f10efee5 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5 2024-11-23T03:23:32,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5, entries=150, sequenceid=211, filesize=11.9 K 2024-11-23T03:23:32,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/655df039d757483a91fed56be81e4264 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264 2024-11-23T03:23:32,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264, entries=150, sequenceid=211, filesize=11.9 K 2024-11-23T03:23:32,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for badf71fee211af0b660972c46cad8684 in 189ms, sequenceid=211, compaction requested=true 2024-11-23T03:23:32,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,297 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:32,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:32,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:32,298 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:32,298 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110979 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:32,298 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:32,298 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,298 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=108.4 K 2024-11-23T03:23:32,298 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,298 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc] 2024-11-23T03:23:32,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:32,300 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:32,300 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:32,300 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ad3237426384b4d8d87742ffb9d32bd, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:32,300 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,300 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f7ecba21e769401c804c33fc021c76a4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.0 K 2024-11-23T03:23:32,301 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ecba21e769401c804c33fc021c76a4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:32,302 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee2427732d1542c7b9ca584b07c5c38f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732332211460 2024-11-23T03:23:32,303 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dd548c7fbd542459c3e467e6e5be6f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732332211468 2024-11-23T03:23:32,303 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b734440f88547999449951199ee5abc, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211789 2024-11-23T03:23:32,303 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting f32cf223bbef4208bc419206f10efee5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211797 2024-11-23T03:23:32,310 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,311 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#563 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:32,312 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/478f005c6f324be597b8e37c9805d9f6 is 50, key is test_row_0/B:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,316 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123d464def9c59c4c62a276c22aa88b038b_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,319 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123d464def9c59c4c62a276c22aa88b038b_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,319 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d464def9c59c4c62a276c22aa88b038b_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:32,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T03:23:32,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,345 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:32,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:32,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742483_1659 (size=12629) 2024-11-23T03:23:32,366 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/478f005c6f324be597b8e37c9805d9f6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/478f005c6f324be597b8e37c9805d9f6 2024-11-23T03:23:32,371 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 478f005c6f324be597b8e37c9805d9f6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:32,371 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,371 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332212298; duration=0sec 2024-11-23T03:23:32,371 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:32,371 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:32,371 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:32,373 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:32,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:32,374 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:32,374 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2deb66fae83f4d34a4d6b53bc0f288d1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.0 K 2024-11-23T03:23:32,374 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 2deb66fae83f4d34a4d6b53bc0f288d1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732332211337 2024-11-23T03:23:32,375 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 784c2400359045a28718769a0e09f7fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732332211468 2024-11-23T03:23:32,375 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 655df039d757483a91fed56be81e4264, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211797 2024-11-23T03:23:32,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742484_1660 (size=4469) 2024-11-23T03:23:32,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112309671c1505cf40fc81e38db2dc4a48cf_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332212146/Put/seqid=0 2024-11-23T03:23:32,400 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:32,400 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/4183b362d5a54c21935a1606467ac1b3 is 50, key is test_row_0/C:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742485_1661 (size=12304) 2024-11-23T03:23:32,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742486_1662 (size=12629) 2024-11-23T03:23:32,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:32,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:32,463 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/4183b362d5a54c21935a1606467ac1b3 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4183b362d5a54c21935a1606467ac1b3 2024-11-23T03:23:32,469 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into 4183b362d5a54c21935a1606467ac1b3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:32,469 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:32,469 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332212299; duration=0sec 2024-11-23T03:23:32,469 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:32,469 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:32,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332272467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332272468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332272471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332272468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332272472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332272573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332272575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332272575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332272575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332272576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332272775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332272777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332272778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332272779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:32,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332272779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:32,785 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#564 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:32,786 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/552c737684b3471e8fb10fbb3ac84743 is 175, key is test_row_0/A:col10/1732332212107/Put/seqid=0 2024-11-23T03:23:32,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742487_1663 (size=31583) 2024-11-23T03:23:32,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:32,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:32,847 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112309671c1505cf40fc81e38db2dc4a48cf_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309671c1505cf40fc81e38db2dc4a48cf_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:32,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8f36de8c3fda481ab38206bc5eed64d7, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:32,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8f36de8c3fda481ab38206bc5eed64d7 is 175, key is test_row_0/A:col10/1732332212146/Put/seqid=0 2024-11-23T03:23:32,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742488_1664 (size=31105) 2024-11-23T03:23:33,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332273079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332273079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332273083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332273085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332273085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,220 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/552c737684b3471e8fb10fbb3ac84743 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743 2024-11-23T03:23:33,226 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 552c737684b3471e8fb10fbb3ac84743(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:33,226 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:33,226 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332212297; duration=0sec 2024-11-23T03:23:33,227 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:33,227 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:33,298 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8f36de8c3fda481ab38206bc5eed64d7 2024-11-23T03:23:33,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/47de8098ed8f4b8391c8ee5cb4650e8f is 50, key is test_row_0/B:col10/1732332212146/Put/seqid=0 2024-11-23T03:23:33,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742489_1665 (size=12151) 2024-11-23T03:23:33,328 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/47de8098ed8f4b8391c8ee5cb4650e8f 2024-11-23T03:23:33,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/65fe5fc6fe3a45059fbccb8f1a7b06fd is 50, key is test_row_0/C:col10/1732332212146/Put/seqid=0 2024-11-23T03:23:33,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742490_1666 (size=12151) 2024-11-23T03:23:33,382 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/65fe5fc6fe3a45059fbccb8f1a7b06fd 2024-11-23T03:23:33,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8f36de8c3fda481ab38206bc5eed64d7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7 2024-11-23T03:23:33,395 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7, entries=150, sequenceid=236, filesize=30.4 K 2024-11-23T03:23:33,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/47de8098ed8f4b8391c8ee5cb4650e8f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f 2024-11-23T03:23:33,402 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f, entries=150, sequenceid=236, filesize=11.9 K 2024-11-23T03:23:33,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/65fe5fc6fe3a45059fbccb8f1a7b06fd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd 2024-11-23T03:23:33,407 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd, entries=150, sequenceid=236, filesize=11.9 K 2024-11-23T03:23:33,412 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for badf71fee211af0b660972c46cad8684 in 1067ms, sequenceid=236, compaction requested=false 2024-11-23T03:23:33,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:33,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:33,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-23T03:23:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-23T03:23:33,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-23T03:23:33,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6830 sec 2024-11-23T03:23:33,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.6870 sec 2024-11-23T03:23:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:33,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:33,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:33,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236c29968ca0804b51929f87faf8a5b995_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:33,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742491_1667 (size=12304) 2024-11-23T03:23:33,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332273616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332273620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332273621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332273622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332273623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332273724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332273724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332273725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332273727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332273727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T03:23:33,836 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-23T03:23:33,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-23T03:23:33,839 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:33,839 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:33,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:33,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332273928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332273929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332273930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332273933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332273934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:33,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:33,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T03:23:33,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:33,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:33,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:33,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:33,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,018 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:34,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236c29968ca0804b51929f87faf8a5b995_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c29968ca0804b51929f87faf8a5b995_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:34,023 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82148f723e324a3fb7b8e56592ea6ebd, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:34,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82148f723e324a3fb7b8e56592ea6ebd is 175, key is test_row_0/A:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:34,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742492_1668 (size=31105) 2024-11-23T03:23:34,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:34,144 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T03:23:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332274233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332274233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332274236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332274238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332274240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T03:23:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:34,448 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82148f723e324a3fb7b8e56592ea6ebd 2024-11-23T03:23:34,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T03:23:34,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:34,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:34,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/a5da4efae44f426aa61b33c77d06f4c8 is 50, key is test_row_0/B:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:34,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742493_1669 (size=12151) 2024-11-23T03:23:34,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/a5da4efae44f426aa61b33c77d06f4c8 2024-11-23T03:23:34,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5066316a98044c8293cbc2b6f14db5e0 is 50, key is test_row_0/C:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:34,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742494_1670 (size=12151) 2024-11-23T03:23:34,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5066316a98044c8293cbc2b6f14db5e0 2024-11-23T03:23:34,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/82148f723e324a3fb7b8e56592ea6ebd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd 2024-11-23T03:23:34,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd, entries=150, sequenceid=253, filesize=30.4 K 2024-11-23T03:23:34,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/a5da4efae44f426aa61b33c77d06f4c8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8 2024-11-23T03:23:34,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8, entries=150, sequenceid=253, filesize=11.9 K 2024-11-23T03:23:34,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/5066316a98044c8293cbc2b6f14db5e0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0 2024-11-23T03:23:34,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0, entries=150, sequenceid=253, filesize=11.9 K 2024-11-23T03:23:34,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for badf71fee211af0b660972c46cad8684 in 1013ms, sequenceid=253, compaction requested=true 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:34,600 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:34,600 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:34,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:34,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T03:23:34,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,603 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:34,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:34,603 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:34,604 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:34,604 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,604 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/478f005c6f324be597b8e37c9805d9f6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.1 K 2024-11-23T03:23:34,606 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 478f005c6f324be597b8e37c9805d9f6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211797 2024-11-23T03:23:34,606 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:34,606 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:34,606 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,606 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=91.6 K 2024-11-23T03:23:34,606 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,606 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd] 2024-11-23T03:23:34,608 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 47de8098ed8f4b8391c8ee5cb4650e8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332212142 2024-11-23T03:23:34,608 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 552c737684b3471e8fb10fbb3ac84743, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211797 2024-11-23T03:23:34,608 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting a5da4efae44f426aa61b33c77d06f4c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:34,609 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f36de8c3fda481ab38206bc5eed64d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332212142 2024-11-23T03:23:34,610 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82148f723e324a3fb7b8e56592ea6ebd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:34,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a2a8d0593d87415a99518d4e4c7e07af_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332213619/Put/seqid=0 2024-11-23T03:23:34,652 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:34,670 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#574 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:34,671 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/85de7464d0d1499088a9c0154f8cbc20 is 50, key is test_row_0/B:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:34,680 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112351b78133bb6c4b29a0cc78c8b4673c6b_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:34,683 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112351b78133bb6c4b29a0cc78c8b4673c6b_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:34,686 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112351b78133bb6c4b29a0cc78c8b4673c6b_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:34,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742495_1671 (size=12454) 2024-11-23T03:23:34,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742496_1672 (size=12731) 2024-11-23T03:23:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742497_1673 (size=4469) 2024-11-23T03:23:34,717 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/85de7464d0d1499088a9c0154f8cbc20 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/85de7464d0d1499088a9c0154f8cbc20 2024-11-23T03:23:34,724 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 85de7464d0d1499088a9c0154f8cbc20(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:34,724 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:34,724 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332214600; duration=0sec 2024-11-23T03:23:34,724 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:34,724 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:34,724 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:34,726 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:34,726 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:34,726 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:34,726 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4183b362d5a54c21935a1606467ac1b3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.1 K 2024-11-23T03:23:34,727 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4183b362d5a54c21935a1606467ac1b3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732332211797 2024-11-23T03:23:34,727 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 65fe5fc6fe3a45059fbccb8f1a7b06fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732332212142 2024-11-23T03:23:34,727 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 5066316a98044c8293cbc2b6f14db5e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:34,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:34,749 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#575 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:34,749 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/ee9ce258cea94fcfb64306a67d676c47 is 50, key is test_row_0/C:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:34,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742498_1674 (size=12731) 2024-11-23T03:23:34,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332274755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332274756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332274755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332274760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332274760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,770 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/ee9ce258cea94fcfb64306a67d676c47 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ee9ce258cea94fcfb64306a67d676c47 2024-11-23T03:23:34,779 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into ee9ce258cea94fcfb64306a67d676c47(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:34,779 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:34,779 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332214600; duration=0sec 2024-11-23T03:23:34,779 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:34,779 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:34,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332274866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332274866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332274866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332274866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332274866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:35,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332275069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332275069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332275070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332275071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332275070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:35,105 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a2a8d0593d87415a99518d4e4c7e07af_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a2a8d0593d87415a99518d4e4c7e07af_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:35,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/49bdad951e8a497d95b57d42755e5c93, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:35,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/49bdad951e8a497d95b57d42755e5c93 is 175, key is test_row_0/A:col10/1732332213619/Put/seqid=0 2024-11-23T03:23:35,111 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#573 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:35,111 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee97327f2768478d9f4e1707f96b4446 is 175, key is test_row_0/A:col10/1732332212466/Put/seqid=0 2024-11-23T03:23:35,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742499_1675 (size=31255) 2024-11-23T03:23:35,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742500_1676 (size=31685) 2024-11-23T03:23:35,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332275373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332275374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332275376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332275376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332275376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,554 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/49bdad951e8a497d95b57d42755e5c93 2024-11-23T03:23:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/d126baf85a03421b98b170abced325fd is 50, key is test_row_0/B:col10/1732332213619/Put/seqid=0 2024-11-23T03:23:35,578 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/ee97327f2768478d9f4e1707f96b4446 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446 2024-11-23T03:23:35,584 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into ee97327f2768478d9f4e1707f96b4446(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:35,584 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:35,584 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332214600; duration=0sec 2024-11-23T03:23:35,584 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:35,584 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742501_1677 (size=12301) 2024-11-23T03:23:35,592 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/d126baf85a03421b98b170abced325fd 2024-11-23T03:23:35,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/4ffc31911e424abf84de8b2f17654a01 is 50, key is test_row_0/C:col10/1732332213619/Put/seqid=0 2024-11-23T03:23:35,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742502_1678 (size=12301) 2024-11-23T03:23:35,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332275877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332275879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332275880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332275881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332275883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:36,034 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/4ffc31911e424abf84de8b2f17654a01 2024-11-23T03:23:36,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/49bdad951e8a497d95b57d42755e5c93 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93 2024-11-23T03:23:36,050 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93, entries=150, sequenceid=276, filesize=30.5 K 2024-11-23T03:23:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/d126baf85a03421b98b170abced325fd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd 2024-11-23T03:23:36,056 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:23:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/4ffc31911e424abf84de8b2f17654a01 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01 2024-11-23T03:23:36,064 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T03:23:36,065 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for badf71fee211af0b660972c46cad8684 in 1462ms, sequenceid=276, compaction requested=false 2024-11-23T03:23:36,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:36,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:36,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-23T03:23:36,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-23T03:23:36,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-23T03:23:36,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2270 sec 2024-11-23T03:23:36,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.2320 sec 2024-11-23T03:23:36,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:23:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:36,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123745ed262a0ed46bda57ae597e9bab75f_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:36,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:36,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332276948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:36,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332276949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:36,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332276949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:36,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332276950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:36,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742503_1679 (size=14994) 2024-11-23T03:23:36,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332276955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332277057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332277057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332277058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332277058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332277060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332277260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332277263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332277263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332277263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332277263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,358 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:37,362 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123745ed262a0ed46bda57ae597e9bab75f_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123745ed262a0ed46bda57ae597e9bab75f_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:37,363 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/19a9c370e22c45ccb81659deb7d2bccd, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/19a9c370e22c45ccb81659deb7d2bccd is 175, key is test_row_0/A:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:37,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742504_1680 (size=39949) 2024-11-23T03:23:37,394 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/19a9c370e22c45ccb81659deb7d2bccd 2024-11-23T03:23:37,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6826924e77c1406e93bbaf9406894fab is 50, key is test_row_0/B:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742505_1681 (size=12301) 2024-11-23T03:23:37,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6826924e77c1406e93bbaf9406894fab 2024-11-23T03:23:37,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e1056ff07f5145e8805fdc50a0b17057 is 50, key is test_row_0/C:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:37,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742506_1682 (size=12301) 2024-11-23T03:23:37,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e1056ff07f5145e8805fdc50a0b17057 2024-11-23T03:23:37,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/19a9c370e22c45ccb81659deb7d2bccd as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd 2024-11-23T03:23:37,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd, entries=200, sequenceid=296, filesize=39.0 K 2024-11-23T03:23:37,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6826924e77c1406e93bbaf9406894fab as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab 2024-11-23T03:23:37,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab, entries=150, sequenceid=296, filesize=12.0 K 2024-11-23T03:23:37,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e1056ff07f5145e8805fdc50a0b17057 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057 2024-11-23T03:23:37,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057, entries=150, sequenceid=296, filesize=12.0 K 2024-11-23T03:23:37,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for badf71fee211af0b660972c46cad8684 in 634ms, sequenceid=296, compaction requested=true 2024-11-23T03:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:37,523 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:37,523 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:37,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:37,524 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:37,524 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:37,524 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:37,524 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:37,524 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:37,524 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:37,524 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/85de7464d0d1499088a9c0154f8cbc20, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.5 K 2024-11-23T03:23:37,524 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=100.5 K 2024-11-23T03:23:37,524 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:37,524 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd] 2024-11-23T03:23:37,525 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 85de7464d0d1499088a9c0154f8cbc20, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:37,525 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee97327f2768478d9f4e1707f96b4446, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:37,525 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d126baf85a03421b98b170abced325fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332213619 2024-11-23T03:23:37,526 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 6826924e77c1406e93bbaf9406894fab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:37,526 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49bdad951e8a497d95b57d42755e5c93, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332213619 2024-11-23T03:23:37,526 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19a9c370e22c45ccb81659deb7d2bccd, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:37,542 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#581 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:37,542 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/46f3ea076d0c4d838fd9616c1a0f472b is 50, key is test_row_0/B:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:37,547 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:37,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:23:37,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:37,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:37,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:37,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742507_1683 (size=12983) 2024-11-23T03:23:37,580 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/46f3ea076d0c4d838fd9616c1a0f472b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/46f3ea076d0c4d838fd9616c1a0f472b 2024-11-23T03:23:37,583 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123e9460cfeb1ae44e1a8dbd632cc25abcf_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,585 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123e9460cfeb1ae44e1a8dbd632cc25abcf_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,585 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e9460cfeb1ae44e1a8dbd632cc25abcf_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,586 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 46f3ea076d0c4d838fd9616c1a0f472b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:37,586 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:37,586 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332217523; duration=0sec 2024-11-23T03:23:37,586 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:37,586 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:37,586 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:37,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332277583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,592 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:37,592 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:37,592 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:37,592 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ee9ce258cea94fcfb64306a67d676c47, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.5 K 2024-11-23T03:23:37,593 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ee9ce258cea94fcfb64306a67d676c47, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732332212466 2024-11-23T03:23:37,593 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ffc31911e424abf84de8b2f17654a01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732332213619 2024-11-23T03:23:37,593 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e1056ff07f5145e8805fdc50a0b17057, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332277585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332277589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332277589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332277590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112357c67fa91e2b40adae1375dc074b8317_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332216951/Put/seqid=0 2024-11-23T03:23:37,628 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#584 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:37,629 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/44564673066d414bb660f1d4ffc5765a is 50, key is test_row_0/C:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:37,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742508_1684 (size=4469) 2024-11-23T03:23:37,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742509_1685 (size=12454) 2024-11-23T03:23:37,656 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:37,660 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112357c67fa91e2b40adae1375dc074b8317_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357c67fa91e2b40adae1375dc074b8317_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:37,661 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/09af06b903ca4e1a85576b4e6e22e285, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/09af06b903ca4e1a85576b4e6e22e285 is 175, key is test_row_0/A:col10/1732332216951/Put/seqid=0 2024-11-23T03:23:37,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742510_1686 (size=12983) 2024-11-23T03:23:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742511_1687 (size=31255) 2024-11-23T03:23:37,676 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/09af06b903ca4e1a85576b4e6e22e285 2024-11-23T03:23:37,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/c753a74d4b4a41e691806a14fe0607be is 50, key is test_row_0/B:col10/1732332216951/Put/seqid=0 2024-11-23T03:23:37,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332277692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332277695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332277696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332277697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332277697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742512_1688 (size=12301) 2024-11-23T03:23:37,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/c753a74d4b4a41e691806a14fe0607be 2024-11-23T03:23:37,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7a40f77322b045118ecdc8f745bb90ac is 50, key is test_row_0/C:col10/1732332216951/Put/seqid=0 2024-11-23T03:23:37,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742513_1689 (size=12301) 2024-11-23T03:23:37,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7a40f77322b045118ecdc8f745bb90ac 2024-11-23T03:23:37,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/09af06b903ca4e1a85576b4e6e22e285 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285 2024-11-23T03:23:37,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285, entries=150, sequenceid=316, filesize=30.5 K 2024-11-23T03:23:37,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/c753a74d4b4a41e691806a14fe0607be as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be 2024-11-23T03:23:37,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be, entries=150, sequenceid=316, filesize=12.0 K 2024-11-23T03:23:37,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7a40f77322b045118ecdc8f745bb90ac as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac 2024-11-23T03:23:37,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac, entries=150, sequenceid=316, filesize=12.0 K 2024-11-23T03:23:37,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for badf71fee211af0b660972c46cad8684 in 220ms, sequenceid=316, compaction requested=false 2024-11-23T03:23:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:37,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:37,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:37,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112378963d4bbeb44ae89aa3ad5fd093795c_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:37,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332277922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742514_1690 (size=14994) 2024-11-23T03:23:37,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332277923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332277924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,930 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:37,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332277927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332277927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:37,935 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112378963d4bbeb44ae89aa3ad5fd093795c_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112378963d4bbeb44ae89aa3ad5fd093795c_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:37,936 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8acf3c61f78d427b9f3dbb6148a243f8, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:37,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8acf3c61f78d427b9f3dbb6148a243f8 is 175, key is test_row_0/A:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T03:23:37,949 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-23T03:23:37,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-23T03:23:37,954 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:37,956 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:37,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:37,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742515_1691 (size=39949) 2024-11-23T03:23:37,966 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8acf3c61f78d427b9f3dbb6148a243f8 2024-11-23T03:23:37,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/81b39b7ba63e41b2ba6e7059ca892c83 is 50, key is test_row_0/B:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:38,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742516_1692 (size=12301) 2024-11-23T03:23:38,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/81b39b7ba63e41b2ba6e7059ca892c83 2024-11-23T03:23:38,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/46daac584a054d3eaae74f9e103f05bb is 50, key is test_row_0/C:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:38,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332278030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,033 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#582 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:38,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332278030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332278031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,034 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2a77e033a1f6429897f8a54bd920daa7 is 175, key is test_row_0/A:col10/1732332214755/Put/seqid=0 2024-11-23T03:23:38,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332278034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332278035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742517_1693 (size=12301) 2024-11-23T03:23:38,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742518_1694 (size=31937) 2024-11-23T03:23:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:38,062 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2a77e033a1f6429897f8a54bd920daa7 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7 2024-11-23T03:23:38,071 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 2a77e033a1f6429897f8a54bd920daa7(size=31.2 K), total size for store is 61.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:38,071 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:38,071 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332217523; duration=0sec 2024-11-23T03:23:38,071 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:38,071 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:38,075 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/44564673066d414bb660f1d4ffc5765a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/44564673066d414bb660f1d4ffc5765a 2024-11-23T03:23:38,081 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into 44564673066d414bb660f1d4ffc5765a(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:38,081 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:38,081 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332217523; duration=0sec 2024-11-23T03:23:38,081 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:38,081 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:38,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332278233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332278235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332278237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332278240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332278241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:38,260 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T03:23:38,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/46daac584a054d3eaae74f9e103f05bb 2024-11-23T03:23:38,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8acf3c61f78d427b9f3dbb6148a243f8 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8 2024-11-23T03:23:38,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8, entries=200, sequenceid=334, filesize=39.0 K 2024-11-23T03:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/81b39b7ba63e41b2ba6e7059ca892c83 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83 2024-11-23T03:23:38,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83, entries=150, sequenceid=334, filesize=12.0 K 2024-11-23T03:23:38,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/46daac584a054d3eaae74f9e103f05bb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb 2024-11-23T03:23:38,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb, entries=150, sequenceid=334, filesize=12.0 K 2024-11-23T03:23:38,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for badf71fee211af0b660972c46cad8684 in 577ms, sequenceid=334, compaction requested=true 2024-11-23T03:23:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:38,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:38,476 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:38,477 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:38,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:38,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:38,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:38,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:38,478 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,478 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,478 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=100.7 K 2024-11-23T03:23:38,478 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/46f3ea076d0c4d838fd9616c1a0f472b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.7 K 2024-11-23T03:23:38,478 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8] 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 46f3ea076d0c4d838fd9616c1a0f472b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:38,478 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a77e033a1f6429897f8a54bd920daa7, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:38,479 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting c753a74d4b4a41e691806a14fe0607be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332216942 2024-11-23T03:23:38,479 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09af06b903ca4e1a85576b4e6e22e285, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332216942 2024-11-23T03:23:38,479 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 81b39b7ba63e41b2ba6e7059ca892c83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:38,479 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8acf3c61f78d427b9f3dbb6148a243f8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:38,488 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:38,506 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#591 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:38,506 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9c70e49fb34244c1918cb1b8adc85142 is 50, key is test_row_0/B:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:38,506 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123c2b40ea3cb794e808d07b2df35d9ce3a_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:38,508 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123c2b40ea3cb794e808d07b2df35d9ce3a_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:38,508 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c2b40ea3cb794e808d07b2df35d9ce3a_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:38,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:38,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:38,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742519_1695 (size=13085) 2024-11-23T03:23:38,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742520_1696 (size=4469) 2024-11-23T03:23:38,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112397589289845d497c9d9ffc09c6da3374_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332217925/Put/seqid=0 2024-11-23T03:23:38,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332278572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332278573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332278573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332278574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742521_1697 (size=14994) 2024-11-23T03:23:38,581 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:38,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332278578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,585 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112397589289845d497c9d9ffc09c6da3374_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112397589289845d497c9d9ffc09c6da3374_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:38,586 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1bd980e072554df48d2ee09f4ac20016, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:38,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1bd980e072554df48d2ee09f4ac20016 is 175, key is test_row_0/A:col10/1732332217925/Put/seqid=0 2024-11-23T03:23:38,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742522_1698 (size=39949) 2024-11-23T03:23:38,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332278679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332278679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332278679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332278679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332278683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:38,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332278883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332278883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332278883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332278884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332278888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:38,966 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#590 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:38,967 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/d9ecdaafaecd41ba93faa5ce8a48138c is 175, key is test_row_0/A:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:38,969 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9c70e49fb34244c1918cb1b8adc85142 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9c70e49fb34244c1918cb1b8adc85142 2024-11-23T03:23:38,975 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 9c70e49fb34244c1918cb1b8adc85142(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:38,975 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:38,975 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332218476; duration=0sec 2024-11-23T03:23:38,976 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:38,976 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:38,976 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:38,977 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:38,977 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:38,978 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:38,978 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/44564673066d414bb660f1d4ffc5765a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.7 K 2024-11-23T03:23:38,978 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 44564673066d414bb660f1d4ffc5765a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732332214755 2024-11-23T03:23:38,979 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a40f77322b045118ecdc8f745bb90ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732332216942 2024-11-23T03:23:38,979 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 46daac584a054d3eaae74f9e103f05bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:39,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742523_1699 (size=32039) 2024-11-23T03:23:39,014 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#593 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:39,014 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/31c28926d19947c098364b17837cefd4 is 50, key is test_row_0/C:col10/1732332217588/Put/seqid=0 2024-11-23T03:23:39,019 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1bd980e072554df48d2ee09f4ac20016 2024-11-23T03:23:39,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742524_1700 (size=13085) 2024-11-23T03:23:39,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c is 50, key is test_row_0/B:col10/1732332217925/Put/seqid=0 2024-11-23T03:23:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:39,062 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/31c28926d19947c098364b17837cefd4 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/31c28926d19947c098364b17837cefd4 2024-11-23T03:23:39,066 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into 31c28926d19947c098364b17837cefd4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:39,066 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:39,066 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332218477; duration=0sec 2024-11-23T03:23:39,067 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:39,067 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:39,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742525_1701 (size=12301) 2024-11-23T03:23:39,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c 2024-11-23T03:23:39,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/479cbd251ab2485a93451d52fe7c4546 is 50, key is test_row_0/C:col10/1732332217925/Put/seqid=0 2024-11-23T03:23:39,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742526_1702 (size=12301) 2024-11-23T03:23:39,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/479cbd251ab2485a93451d52fe7c4546 2024-11-23T03:23:39,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1bd980e072554df48d2ee09f4ac20016 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016 2024-11-23T03:23:39,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016, entries=200, sequenceid=356, filesize=39.0 K 2024-11-23T03:23:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c 2024-11-23T03:23:39,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:23:39,183 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/479cbd251ab2485a93451d52fe7c4546 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546 2024-11-23T03:23:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332279187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332279188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332279189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546, entries=150, sequenceid=356, filesize=12.0 K 2024-11-23T03:23:39,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332279189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for badf71fee211af0b660972c46cad8684 in 655ms, sequenceid=356, compaction requested=false 2024-11-23T03:23:39,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:39,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:23:39,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:39,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:39,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:39,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233a0cdb9a438f499395932d2ec5c26cca_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332279271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742527_1703 (size=12454) 2024-11-23T03:23:39,337 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332279375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,407 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/d9ecdaafaecd41ba93faa5ce8a48138c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c 2024-11-23T03:23:39,412 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into d9ecdaafaecd41ba93faa5ce8a48138c(size=31.3 K), total size for store is 70.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:39,412 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:39,412 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332218476; duration=0sec 2024-11-23T03:23:39,412 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:39,412 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:39,490 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332279578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,652 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,675 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:39,679 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233a0cdb9a438f499395932d2ec5c26cca_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233a0cdb9a438f499395932d2ec5c26cca_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:39,680 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/991cd09c91904decac89bea7a684c01b, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:39,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/991cd09c91904decac89bea7a684c01b is 175, key is test_row_0/A:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332279693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332279694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332279695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332279700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742528_1704 (size=31255) 2024-11-23T03:23:39,725 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/991cd09c91904decac89bea7a684c01b 2024-11-23T03:23:39,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/94f681a1af89458286fa083dd8df0660 is 50, key is test_row_0/B:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742529_1705 (size=12301) 2024-11-23T03:23:39,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/94f681a1af89458286fa083dd8df0660 2024-11-23T03:23:39,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7dc8ac2e47e94fe6b134d82bffeb9bab is 50, key is test_row_0/C:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742530_1706 (size=12301) 2024-11-23T03:23:39,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7dc8ac2e47e94fe6b134d82bffeb9bab 2024-11-23T03:23:39,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/991cd09c91904decac89bea7a684c01b as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b 2024-11-23T03:23:39,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b, entries=150, sequenceid=375, filesize=30.5 K 2024-11-23T03:23:39,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/94f681a1af89458286fa083dd8df0660 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660 2024-11-23T03:23:39,805 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660, entries=150, sequenceid=375, filesize=12.0 K 2024-11-23T03:23:39,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/7dc8ac2e47e94fe6b134d82bffeb9bab as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab 2024-11-23T03:23:39,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab, entries=150, sequenceid=375, filesize=12.0 K 2024-11-23T03:23:39,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for badf71fee211af0b660972c46cad8684 in 618ms, sequenceid=375, compaction requested=true 2024-11-23T03:23:39,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:39,814 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:39,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:39,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:39,815 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:39,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:39,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:39,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:39,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:39,818 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:39,818 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:39,818 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,818 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=100.8 K 2024-11-23T03:23:39,818 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,818 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b] 2024-11-23T03:23:39,818 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:39,818 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:39,818 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,818 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9c70e49fb34244c1918cb1b8adc85142, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.8 K 2024-11-23T03:23:39,819 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c70e49fb34244c1918cb1b8adc85142, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:39,819 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9ecdaafaecd41ba93faa5ce8a48138c, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:39,820 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bd0a1d619ed465bb3bdd9ceb4cc7e2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332217923 2024-11-23T03:23:39,820 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bd980e072554df48d2ee09f4ac20016, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332217920 2024-11-23T03:23:39,821 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 94f681a1af89458286fa083dd8df0660, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:39,821 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 991cd09c91904decac89bea7a684c01b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:39,829 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#599 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:39,830 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/7847422446854b79b86f5e261c40ca75 is 50, key is test_row_0/B:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,836 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:39,844 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411233cfddf6bf48542ab986461d995a67596_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:39,847 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411233cfddf6bf48542ab986461d995a67596_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:39,847 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233cfddf6bf48542ab986461d995a67596_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:39,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742531_1707 (size=13187) 2024-11-23T03:23:39,865 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/7847422446854b79b86f5e261c40ca75 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/7847422446854b79b86f5e261c40ca75 2024-11-23T03:23:39,870 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 7847422446854b79b86f5e261c40ca75(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:39,870 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:39,871 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332219815; duration=0sec 2024-11-23T03:23:39,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:39,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:39,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:39,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:39,871 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:39,872 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,872 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/31c28926d19947c098364b17837cefd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.8 K 2024-11-23T03:23:39,872 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 31c28926d19947c098364b17837cefd4, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732332217581 2024-11-23T03:23:39,873 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 479cbd251ab2485a93451d52fe7c4546, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732332217923 2024-11-23T03:23:39,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742532_1708 (size=4469) 2024-11-23T03:23:39,874 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dc8ac2e47e94fe6b134d82bffeb9bab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:39,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:23:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:39,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:39,887 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#601 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:39,887 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d18b6c344c4945aaa2f602e0b91127e1 is 50, key is test_row_0/C:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:39,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230657fc0e515b4d31b0706f5491e1b9d7_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332219883/Put/seqid=0 2024-11-23T03:23:39,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332279935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742533_1709 (size=13187) 2024-11-23T03:23:39,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742534_1710 (size=14994) 2024-11-23T03:23:39,957 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:39,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:39,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:39,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332280041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:40,110 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332280246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,276 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#600 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:40,276 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/90ad14aab6d2475f962ccd3ca9d532b0 is 175, key is test_row_0/A:col10/1732332219195/Put/seqid=0 2024-11-23T03:23:40,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742535_1711 (size=32141) 2024-11-23T03:23:40,290 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/90ad14aab6d2475f962ccd3ca9d532b0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0 2024-11-23T03:23:40,296 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 90ad14aab6d2475f962ccd3ca9d532b0(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:40,296 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:40,296 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332219814; duration=0sec 2024-11-23T03:23:40,297 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:40,297 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:40,348 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d18b6c344c4945aaa2f602e0b91127e1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d18b6c344c4945aaa2f602e0b91127e1 2024-11-23T03:23:40,354 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into d18b6c344c4945aaa2f602e0b91127e1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:40,355 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:40,355 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332219815; duration=0sec 2024-11-23T03:23:40,355 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:40,355 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:40,363 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:40,367 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230657fc0e515b4d31b0706f5491e1b9d7_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230657fc0e515b4d31b0706f5491e1b9d7_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:40,370 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1b76d51d719745179516b504031162dc, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:40,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1b76d51d719745179516b504031162dc is 175, key is test_row_0/A:col10/1732332219883/Put/seqid=0 2024-11-23T03:23:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742536_1712 (size=39949) 2024-11-23T03:23:40,411 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1b76d51d719745179516b504031162dc 2024-11-23T03:23:40,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/ed7d9d88e7c34f51961204586cd8c745 is 50, key is test_row_0/B:col10/1732332219883/Put/seqid=0 2024-11-23T03:23:40,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742537_1713 (size=12301) 2024-11-23T03:23:40,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/ed7d9d88e7c34f51961204586cd8c745 2024-11-23T03:23:40,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e09a32b77209405da9ba216afef0ae2e is 50, key is test_row_0/C:col10/1732332219883/Put/seqid=0 2024-11-23T03:23:40,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742538_1714 (size=12301) 2024-11-23T03:23:40,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e09a32b77209405da9ba216afef0ae2e 2024-11-23T03:23:40,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/1b76d51d719745179516b504031162dc as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc 2024-11-23T03:23:40,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc, entries=200, sequenceid=397, filesize=39.0 K 2024-11-23T03:23:40,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/ed7d9d88e7c34f51961204586cd8c745 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745 2024-11-23T03:23:40,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745, entries=150, sequenceid=397, filesize=12.0 K 2024-11-23T03:23:40,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/e09a32b77209405da9ba216afef0ae2e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e 2024-11-23T03:23:40,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e, entries=150, sequenceid=397, filesize=12.0 K 2024-11-23T03:23:40,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for badf71fee211af0b660972c46cad8684 in 665ms, sequenceid=397, compaction requested=false 2024-11-23T03:23:40,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:40,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:40,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dfbd2af42ef543b18716c7500e72d8e5_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:40,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742539_1715 (size=14994) 2024-11-23T03:23:40,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332280614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332280698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332280706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332280706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332280707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332280718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,876 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:40,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:40,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:40,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:40,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:40,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332280921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,010 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:41,014 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dfbd2af42ef543b18716c7500e72d8e5_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dfbd2af42ef543b18716c7500e72d8e5_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:41,016 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8ef237dc4b4c430ca955b8891c040464, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8ef237dc4b4c430ca955b8891c040464 is 175, key is test_row_0/A:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:41,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:41,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:41,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:41,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742540_1716 (size=39949) 2024-11-23T03:23:41,059 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8ef237dc4b4c430ca955b8891c040464 2024-11-23T03:23:41,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/40002de8b09442f384b3b25fe43b2acf is 50, key is test_row_0/B:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742541_1717 (size=12301) 2024-11-23T03:23:41,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/40002de8b09442f384b3b25fe43b2acf 2024-11-23T03:23:41,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/642bed18753f4ca29ee692f6973da992 is 50, key is test_row_0/C:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742542_1718 (size=12301) 2024-11-23T03:23:41,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/642bed18753f4ca29ee692f6973da992 2024-11-23T03:23:41,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/8ef237dc4b4c430ca955b8891c040464 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464 2024-11-23T03:23:41,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464, entries=200, sequenceid=416, filesize=39.0 K 2024-11-23T03:23:41,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/40002de8b09442f384b3b25fe43b2acf as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf 2024-11-23T03:23:41,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf, entries=150, sequenceid=416, filesize=12.0 K 2024-11-23T03:23:41,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/642bed18753f4ca29ee692f6973da992 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992 2024-11-23T03:23:41,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992, entries=150, sequenceid=416, filesize=12.0 K 2024-11-23T03:23:41,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for badf71fee211af0b660972c46cad8684 in 571ms, sequenceid=416, compaction requested=true 2024-11-23T03:23:41,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:41,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:41,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:41,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:41,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:41,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:41,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:41,124 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:41,124 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,124 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,125 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/7847422446854b79b86f5e261c40ca75, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.9 K 2024-11-23T03:23:41,125 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=109.4 K 2024-11-23T03:23:41,125 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,125 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464] 2024-11-23T03:23:41,125 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90ad14aab6d2475f962ccd3ca9d532b0, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:41,125 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 7847422446854b79b86f5e261c40ca75, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:41,126 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b76d51d719745179516b504031162dc, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732332219261 2024-11-23T03:23:41,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting ed7d9d88e7c34f51961204586cd8c745, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732332219261 2024-11-23T03:23:41,126 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ef237dc4b4c430ca955b8891c040464, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:41,126 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 40002de8b09442f384b3b25fe43b2acf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:41,131 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,132 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#608 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:41,132 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8160ba318d1c4235bfce7d3b17fc28b9 is 50, key is test_row_0/B:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,140 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411238a3ba99213bd4975a39e3ce0eea24378_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,142 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411238a3ba99213bd4975a39e3ce0eea24378_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,142 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238a3ba99213bd4975a39e3ce0eea24378_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742543_1719 (size=13289) 2024-11-23T03:23:41,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742544_1720 (size=4469) 2024-11-23T03:23:41,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,183 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:41,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:41,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238c8c63a1249c43d3830b92399927970d_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332220613/Put/seqid=0 2024-11-23T03:23:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742545_1721 (size=12454) 2024-11-23T03:23:41,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:41,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:41,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332281261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332281364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,548 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/8160ba318d1c4235bfce7d3b17fc28b9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8160ba318d1c4235bfce7d3b17fc28b9 2024-11-23T03:23:41,552 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into 8160ba318d1c4235bfce7d3b17fc28b9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:41,552 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:41,552 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=13, startTime=1732332221124; duration=0sec 2024-11-23T03:23:41,552 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:41,552 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:41,552 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T03:23:41,553 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T03:23:41,553 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:41,553 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:41,553 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d18b6c344c4945aaa2f602e0b91127e1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=36.9 K 2024-11-23T03:23:41,554 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting d18b6c344c4945aaa2f602e0b91127e1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732332218571 2024-11-23T03:23:41,554 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e09a32b77209405da9ba216afef0ae2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732332219261 2024-11-23T03:23:41,554 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 642bed18753f4ca29ee692f6973da992, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:41,557 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#609 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:41,558 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/77b236a44f5d4042b1607418bcb92907 is 175, key is test_row_0/A:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,561 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#611 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:41,562 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d7ec503df18c4bfdb9801c3d4af8e688 is 50, key is test_row_0/C:col10/1732332220550/Put/seqid=0 2024-11-23T03:23:41,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742546_1722 (size=32243) 2024-11-23T03:23:41,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:41,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332281568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742547_1723 (size=13289) 2024-11-23T03:23:41,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:41,597 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238c8c63a1249c43d3830b92399927970d_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238c8c63a1249c43d3830b92399927970d_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:41,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/62076761393343f399b208df9b7647e2, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:41,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/62076761393343f399b208df9b7647e2 is 175, key is test_row_0/A:col10/1732332220613/Put/seqid=0 2024-11-23T03:23:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742548_1724 (size=31255) 2024-11-23T03:23:41,603 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/62076761393343f399b208df9b7647e2 2024-11-23T03:23:41,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e4c360d513584abf8b311ed9955a994a is 50, key is test_row_0/B:col10/1732332220613/Put/seqid=0 2024-11-23T03:23:41,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742549_1725 (size=12301) 2024-11-23T03:23:41,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332281872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:41,974 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/77b236a44f5d4042b1607418bcb92907 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907 2024-11-23T03:23:41,975 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/d7ec503df18c4bfdb9801c3d4af8e688 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d7ec503df18c4bfdb9801c3d4af8e688 2024-11-23T03:23:41,978 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into 77b236a44f5d4042b1607418bcb92907(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:41,979 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=13, startTime=1732332221123; duration=0sec 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:41,979 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into d7ec503df18c4bfdb9801c3d4af8e688(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:41,979 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=13, startTime=1732332221124; duration=0sec 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:41,979 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:42,022 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e4c360d513584abf8b311ed9955a994a 2024-11-23T03:23:42,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/c47cc9faf71d419f92852fefc345ac9d is 50, key is test_row_0/C:col10/1732332220613/Put/seqid=0 2024-11-23T03:23:42,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742550_1726 (size=12301) 2024-11-23T03:23:42,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/c47cc9faf71d419f92852fefc345ac9d 2024-11-23T03:23:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/62076761393343f399b208df9b7647e2 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2 2024-11-23T03:23:42,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2, entries=150, sequenceid=435, filesize=30.5 K 2024-11-23T03:23:42,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e4c360d513584abf8b311ed9955a994a as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a 2024-11-23T03:23:42,043 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a, entries=150, sequenceid=435, filesize=12.0 K 2024-11-23T03:23:42,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/c47cc9faf71d419f92852fefc345ac9d as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d 2024-11-23T03:23:42,047 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d, entries=150, sequenceid=435, filesize=12.0 K 2024-11-23T03:23:42,048 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for badf71fee211af0b660972c46cad8684 in 865ms, sequenceid=435, compaction requested=false 2024-11-23T03:23:42,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:42,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:42,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-23T03:23:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-23T03:23:42,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T03:23:42,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0930 sec 2024-11-23T03:23:42,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 4.0980 sec 2024-11-23T03:23:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T03:23:42,064 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-23T03:23:42,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-23T03:23:42,066 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T03:23:42,066 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:42,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:42,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T03:23:42,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-23T03:23:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:42,218 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T03:23:42,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:42,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:42,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231bbb621139c64c62a831d38723f7522b_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332221260/Put/seqid=0 2024-11-23T03:23:42,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742551_1727 (size=12454) 2024-11-23T03:23:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T03:23:42,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:42,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:42,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332282419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332282523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:42,634 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231bbb621139c64c62a831d38723f7522b_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231bbb621139c64c62a831d38723f7522b_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/e7cbf96082bb490eb5090d98d0e6e42f, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/e7cbf96082bb490eb5090d98d0e6e42f is 175, key is test_row_0/A:col10/1732332221260/Put/seqid=0 2024-11-23T03:23:42,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742552_1728 (size=31255) 2024-11-23T03:23:42,665 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=455, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/e7cbf96082bb490eb5090d98d0e6e42f 2024-11-23T03:23:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T03:23:42,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/821f25ab4b224c4da8165d1e36600a42 is 50, key is test_row_0/B:col10/1732332221260/Put/seqid=0 2024-11-23T03:23:42,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742553_1729 (size=12301) 2024-11-23T03:23:42,711 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/821f25ab4b224c4da8165d1e36600a42 2024-11-23T03:23:42,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33438 deadline: 1732332282715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,719 DEBUG [Thread-2650 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:42,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/603850ab928d43f086af77be66f3bdff is 50, key is test_row_0/C:col10/1732332221260/Put/seqid=0 2024-11-23T03:23:42,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33494 deadline: 1732332282719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33418 deadline: 1732332282720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,722 DEBUG [Thread-2656 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:42,722 DEBUG [Thread-2654 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:42,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33458 deadline: 1732332282724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,726 DEBUG [Thread-2658 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., hostname=0d51875c74df,34141,1732332039937, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T03:23:42,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742554_1730 (size=12301) 2024-11-23T03:23:42,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332282728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:42,730 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/603850ab928d43f086af77be66f3bdff 2024-11-23T03:23:42,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/e7cbf96082bb490eb5090d98d0e6e42f as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f 2024-11-23T03:23:42,742 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f, entries=150, sequenceid=455, filesize=30.5 K 2024-11-23T03:23:42,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/821f25ab4b224c4da8165d1e36600a42 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42 2024-11-23T03:23:42,747 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42, entries=150, sequenceid=455, filesize=12.0 K 2024-11-23T03:23:42,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/603850ab928d43f086af77be66f3bdff as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff 2024-11-23T03:23:42,752 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff, entries=150, sequenceid=455, filesize=12.0 K 2024-11-23T03:23:42,754 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for badf71fee211af0b660972c46cad8684 in 536ms, sequenceid=455, compaction requested=true 2024-11-23T03:23:42,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:42,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:42,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-23T03:23:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-23T03:23:42,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-23T03:23:42,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 689 msec 2024-11-23T03:23:42,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 692 msec 2024-11-23T03:23:43,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:43,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:43,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123203bf435e69c48648ea964f404cd4342_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:43,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742555_1731 (size=12454) 2024-11-23T03:23:43,047 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:43,050 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123203bf435e69c48648ea964f404cd4342_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123203bf435e69c48648ea964f404cd4342_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:43,051 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b22a498b8de74e0cb8f9c7da78c82bb9, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:43,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b22a498b8de74e0cb8f9c7da78c82bb9 is 175, key is test_row_0/A:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:43,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742556_1732 (size=31255) 2024-11-23T03:23:43,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332283077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T03:23:43,169 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-23T03:23:43,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T03:23:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-23T03:23:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:43,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T03:23:43,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T03:23:43,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T03:23:43,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:43,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332283182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:43,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332283384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,456 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=475, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b22a498b8de74e0cb8f9c7da78c82bb9 2024-11-23T03:23:43,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/27cb25536e6f4c9b82047f5543f8ec6e is 50, key is test_row_0/B:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:43,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742557_1733 (size=12301) 2024-11-23T03:23:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:43,476 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:43,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:43,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:43,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332283688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:43,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:43,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:43,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/27cb25536e6f4c9b82047f5543f8ec6e 2024-11-23T03:23:43,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/beee334610f44eaba317051388dd3764 is 50, key is test_row_0/C:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:43,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742558_1734 (size=12301) 2024-11-23T03:23:43,934 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:43,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:43,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:43,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:43,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:43,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:44,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T03:23:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33454 deadline: 1732332284194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 2024-11-23T03:23:44,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:44,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:44,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:44,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T03:23:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:44,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/beee334610f44eaba317051388dd3764 2024-11-23T03:23:44,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b22a498b8de74e0cb8f9c7da78c82bb9 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9 2024-11-23T03:23:44,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9, entries=150, sequenceid=475, filesize=30.5 K 2024-11-23T03:23:44,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/27cb25536e6f4c9b82047f5543f8ec6e as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e 2024-11-23T03:23:44,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e, entries=150, sequenceid=475, filesize=12.0 K 2024-11-23T03:23:44,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/beee334610f44eaba317051388dd3764 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764 2024-11-23T03:23:44,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764, entries=150, sequenceid=475, filesize=12.0 K 2024-11-23T03:23:44,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for badf71fee211af0b660972c46cad8684 in 1256ms, sequenceid=475, compaction requested=true 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:44,291 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store badf71fee211af0b660972c46cad8684:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T03:23:44,291 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:44,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:44,292 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:44,292 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 126008 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:44,292 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/B is initiating minor compaction (all files) 2024-11-23T03:23:44,293 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/B in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/A is initiating minor compaction (all files) 2024-11-23T03:23:44,293 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8160ba318d1c4235bfce7d3b17fc28b9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=49.0 K 2024-11-23T03:23:44,293 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/A in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,293 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=123.1 K 2024-11-23T03:23:44,293 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. files: [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9] 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 8160ba318d1c4235bfce7d3b17fc28b9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77b236a44f5d4042b1607418bcb92907, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting e4c360d513584abf8b311ed9955a994a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332220599 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62076761393343f399b208df9b7647e2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332220599 2024-11-23T03:23:44,293 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 821f25ab4b224c4da8165d1e36600a42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732332221244 2024-11-23T03:23:44,294 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7cbf96082bb490eb5090d98d0e6e42f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732332221244 2024-11-23T03:23:44,294 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] compactions.Compactor(224): Compacting 27cb25536e6f4c9b82047f5543f8ec6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732332222414 2024-11-23T03:23:44,294 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting b22a498b8de74e0cb8f9c7da78c82bb9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732332222414 2024-11-23T03:23:44,299 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:44,300 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#B#compaction#620 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:44,301 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123703a5b73a9774ae78fc4b2c7c64f01a4_badf71fee211af0b660972c46cad8684 store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:44,301 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e40ac20544ef4ddab55c1144658df896 is 50, key is test_row_0/B:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:44,303 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123703a5b73a9774ae78fc4b2c7c64f01a4_badf71fee211af0b660972c46cad8684, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:44,303 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123703a5b73a9774ae78fc4b2c7c64f01a4_badf71fee211af0b660972c46cad8684 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742559_1735 (size=13425) 2024-11-23T03:23:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742560_1736 (size=4469) 2024-11-23T03:23:44,319 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#A#compaction#621 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:44,320 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b3b861387b5040f19899b5e839048713 is 175, key is test_row_0/A:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:44,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742561_1737 (size=32379) 2024-11-23T03:23:44,326 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/b3b861387b5040f19899b5e839048713 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b3b861387b5040f19899b5e839048713 2024-11-23T03:23:44,330 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/A of badf71fee211af0b660972c46cad8684 into b3b861387b5040f19899b5e839048713(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:44,330 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:44,330 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/A, priority=12, startTime=1732332224291; duration=0sec 2024-11-23T03:23:44,330 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T03:23:44,330 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:A 2024-11-23T03:23:44,330 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T03:23:44,331 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T03:23:44,331 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1540): badf71fee211af0b660972c46cad8684/C is initiating minor compaction (all files) 2024-11-23T03:23:44,331 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of badf71fee211af0b660972c46cad8684/C in TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,331 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d7ec503df18c4bfdb9801c3d4af8e688, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764] into tmpdir=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp, totalSize=49.0 K 2024-11-23T03:23:44,331 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7ec503df18c4bfdb9801c3d4af8e688, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732332219926 2024-11-23T03:23:44,332 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting c47cc9faf71d419f92852fefc345ac9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732332220599 2024-11-23T03:23:44,332 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting 603850ab928d43f086af77be66f3bdff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732332221244 2024-11-23T03:23:44,332 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] compactions.Compactor(224): Compacting beee334610f44eaba317051388dd3764, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732332222414 2024-11-23T03:23:44,340 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): badf71fee211af0b660972c46cad8684#C#compaction#622 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T03:23:44,340 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/eabd099f59734f65aef4c9a19fb5dc11 is 50, key is test_row_0/C:col10/1732332222418/Put/seqid=0 2024-11-23T03:23:44,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742562_1738 (size=13425) 2024-11-23T03:23:44,392 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:44,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34141 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T03:23:44,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:44,392 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:44,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:44,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112305741a3844dc4565b7a2e42c4e350f91_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332223073/Put/seqid=0 2024-11-23T03:23:44,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742563_1739 (size=12454) 2024-11-23T03:23:44,407 DEBUG [Thread-2667 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:61411 2024-11-23T03:23:44,407 DEBUG [Thread-2667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:44,407 DEBUG [Thread-2661 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:61411 2024-11-23T03:23:44,408 DEBUG [Thread-2661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:44,408 DEBUG [Thread-2665 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:61411 2024-11-23T03:23:44,408 DEBUG [Thread-2665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:44,409 DEBUG [Thread-2663 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:61411 2024-11-23T03:23:44,409 DEBUG [Thread-2669 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:61411 2024-11-23T03:23:44,409 DEBUG [Thread-2663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:44,409 DEBUG [Thread-2669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:44,722 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/e40ac20544ef4ddab55c1144658df896 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e40ac20544ef4ddab55c1144658df896 2024-11-23T03:23:44,725 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/B of badf71fee211af0b660972c46cad8684 into e40ac20544ef4ddab55c1144658df896(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:44,725 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:44,725 INFO [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/B, priority=12, startTime=1732332224291; duration=0sec 2024-11-23T03:23:44,725 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:44,725 DEBUG [RS:0;0d51875c74df:34141-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:B 2024-11-23T03:23:44,752 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/eabd099f59734f65aef4c9a19fb5dc11 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/eabd099f59734f65aef4c9a19fb5dc11 2024-11-23T03:23:44,755 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in badf71fee211af0b660972c46cad8684/C of badf71fee211af0b660972c46cad8684 into eabd099f59734f65aef4c9a19fb5dc11(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T03:23:44,755 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:44,755 INFO [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684., storeName=badf71fee211af0b660972c46cad8684/C, priority=12, startTime=1732332224291; duration=0sec 2024-11-23T03:23:44,755 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T03:23:44,755 DEBUG [RS:0;0d51875c74df:34141-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: badf71fee211af0b660972c46cad8684:C 2024-11-23T03:23:44,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:44,805 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112305741a3844dc4565b7a2e42c4e350f91_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112305741a3844dc4565b7a2e42c4e350f91_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:44,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2bd276cf4a3f47228c735b0b5f0fb490, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:44,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2bd276cf4a3f47228c735b0b5f0fb490 is 175, key is test_row_0/A:col10/1732332223073/Put/seqid=0 2024-11-23T03:23:44,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742564_1740 (size=31255) 2024-11-23T03:23:45,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34141 {}] regionserver.HRegion(8581): Flush requested on badf71fee211af0b660972c46cad8684 2024-11-23T03:23:45,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. as already flushing 2024-11-23T03:23:45,207 DEBUG [Thread-2652 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:61411 2024-11-23T03:23:45,207 DEBUG [Thread-2652 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:45,209 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=492, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2bd276cf4a3f47228c735b0b5f0fb490 2024-11-23T03:23:45,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/76b3cd9783004900b39c1f7e381747f6 is 50, key is test_row_0/B:col10/1732332223073/Put/seqid=0 2024-11-23T03:23:45,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742565_1741 (size=12301) 2024-11-23T03:23:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:45,617 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/76b3cd9783004900b39c1f7e381747f6 2024-11-23T03:23:45,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/9bca2b8dc9f94849a18b74b225199d74 is 50, key is test_row_0/C:col10/1732332223073/Put/seqid=0 2024-11-23T03:23:45,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742566_1742 (size=12301) 2024-11-23T03:23:46,026 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/9bca2b8dc9f94849a18b74b225199d74 2024-11-23T03:23:46,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/2bd276cf4a3f47228c735b0b5f0fb490 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2bd276cf4a3f47228c735b0b5f0fb490 2024-11-23T03:23:46,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2bd276cf4a3f47228c735b0b5f0fb490, entries=150, sequenceid=492, filesize=30.5 K 2024-11-23T03:23:46,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/76b3cd9783004900b39c1f7e381747f6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/76b3cd9783004900b39c1f7e381747f6 2024-11-23T03:23:46,036 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/76b3cd9783004900b39c1f7e381747f6, entries=150, sequenceid=492, filesize=12.0 K 2024-11-23T03:23:46,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/9bca2b8dc9f94849a18b74b225199d74 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/9bca2b8dc9f94849a18b74b225199d74 2024-11-23T03:23:46,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/9bca2b8dc9f94849a18b74b225199d74, entries=150, sequenceid=492, filesize=12.0 K 2024-11-23T03:23:46,040 INFO [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=6.71 KB/6870 for badf71fee211af0b660972c46cad8684 in 1647ms, sequenceid=492, compaction requested=false 2024-11-23T03:23:46,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:46,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:46,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d51875c74df:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-23T03:23:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-23T03:23:46,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-23T03:23:46,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8680 sec 2024-11-23T03:23:46,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.8710 sec 2024-11-23T03:23:46,736 DEBUG [Thread-2658 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:61411 2024-11-23T03:23:46,736 DEBUG [Thread-2658 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:46,736 DEBUG [Thread-2656 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:61411 2024-11-23T03:23:46,736 DEBUG [Thread-2656 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:46,745 DEBUG [Thread-2650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fb684eb to 127.0.0.1:61411 2024-11-23T03:23:46,745 DEBUG [Thread-2650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:46,755 DEBUG [Thread-2654 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:61411 2024-11-23T03:23:46,755 DEBUG [Thread-2654 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:47,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T03:23:47,276 INFO [Thread-2660 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 148 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4766 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4652 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4647 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4762 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4647 2024-11-23T03:23:47,276 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T03:23:47,276 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:23:47,276 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cbfd84f to 127.0.0.1:61411 2024-11-23T03:23:47,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:47,277 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T03:23:47,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T03:23:47,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:47,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:47,279 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332227279"}]},"ts":"1732332227279"} 2024-11-23T03:23:47,280 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T03:23:47,281 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T03:23:47,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T03:23:47,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, UNASSIGN}] 2024-11-23T03:23:47,283 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, UNASSIGN 2024-11-23T03:23:47,284 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=CLOSING, regionLocation=0d51875c74df,34141,1732332039937 2024-11-23T03:23:47,284 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T03:23:47,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; CloseRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937}] 2024-11-23T03:23:47,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:47,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0d51875c74df,34141,1732332039937 2024-11-23T03:23:47,436 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close badf71fee211af0b660972c46cad8684 2024-11-23T03:23:47,436 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T03:23:47,436 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing badf71fee211af0b660972c46cad8684, disabling compactions & flushes 2024-11-23T03:23:47,436 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:47,436 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:47,436 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. after waiting 0 ms 2024-11-23T03:23:47,436 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:47,436 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(2837): Flushing badf71fee211af0b660972c46cad8684 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=A 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=B 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK badf71fee211af0b660972c46cad8684, store=C 2024-11-23T03:23:47,437 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T03:23:47,441 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236ce0cf11a67543dfae84169a940a7b67_badf71fee211af0b660972c46cad8684 is 50, key is test_row_0/A:col10/1732332226735/Put/seqid=0 2024-11-23T03:23:47,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742567_1743 (size=12454) 2024-11-23T03:23:47,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:47,844 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T03:23:47,847 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236ce0cf11a67543dfae84169a940a7b67_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ce0cf11a67543dfae84169a940a7b67_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:47,848 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/549596435d9a48a79a5ddfc825bfa432, store: [table=TestAcidGuarantees family=A region=badf71fee211af0b660972c46cad8684] 2024-11-23T03:23:47,848 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/549596435d9a48a79a5ddfc825bfa432 is 175, key is test_row_0/A:col10/1732332226735/Put/seqid=0 2024-11-23T03:23:47,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742568_1744 (size=31255) 2024-11-23T03:23:47,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:48,252 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=502, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/549596435d9a48a79a5ddfc825bfa432 2024-11-23T03:23:48,256 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6a9353d270a3475ea9b6d5921a761656 is 50, key is test_row_0/B:col10/1732332226735/Put/seqid=0 2024-11-23T03:23:48,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742569_1745 (size=12301) 2024-11-23T03:23:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:48,660 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6a9353d270a3475ea9b6d5921a761656 2024-11-23T03:23:48,665 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/6fde3372a94f4feea9d625c8d3f175ad is 50, key is test_row_0/C:col10/1732332226735/Put/seqid=0 2024-11-23T03:23:48,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742570_1746 (size=12301) 2024-11-23T03:23:49,068 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/6fde3372a94f4feea9d625c8d3f175ad 2024-11-23T03:23:49,071 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/A/549596435d9a48a79a5ddfc825bfa432 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/549596435d9a48a79a5ddfc825bfa432 2024-11-23T03:23:49,073 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/549596435d9a48a79a5ddfc825bfa432, entries=150, sequenceid=502, filesize=30.5 K 2024-11-23T03:23:49,074 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/B/6a9353d270a3475ea9b6d5921a761656 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6a9353d270a3475ea9b6d5921a761656 2024-11-23T03:23:49,076 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6a9353d270a3475ea9b6d5921a761656, entries=150, sequenceid=502, filesize=12.0 K 2024-11-23T03:23:49,077 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/.tmp/C/6fde3372a94f4feea9d625c8d3f175ad as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/6fde3372a94f4feea9d625c8d3f175ad 2024-11-23T03:23:49,079 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/6fde3372a94f4feea9d625c8d3f175ad, entries=150, sequenceid=502, filesize=12.0 K 2024-11-23T03:23:49,080 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for badf71fee211af0b660972c46cad8684 in 1643ms, sequenceid=502, compaction requested=true 2024-11-23T03:23:49,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9] to archive 2024-11-23T03:23:49,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:49,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/dda6f305b7294d48bd16cdc402d2f7ce 2024-11-23T03:23:49,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/3ff7dc89505c4bcda8fe75ec3ce2f152 2024-11-23T03:23:49,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77d6d179794d4f9baa47735b58f813bf 2024-11-23T03:23:49,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8a212921431d471086a150659ca40d66 2024-11-23T03:23:49,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/c1d170afe23d43cfbeadc2b505722727 2024-11-23T03:23:49,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/0e06602ac0754de08869957f02d6b877 2024-11-23T03:23:49,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/45986f38c56445aa9ab5f5acf973bace 2024-11-23T03:23:49,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/810e4746a93e4f6cb160720f031cb566 2024-11-23T03:23:49,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/6dbff4e442ab4f5f97a3771d3cd79b37 2024-11-23T03:23:49,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82fa4a1d67714cb4b37854a49520a907 2024-11-23T03:23:49,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/7ad3237426384b4d8d87742ffb9d32bd 2024-11-23T03:23:49,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b50aaa6e660847c6904afbf6cb1a74db 2024-11-23T03:23:49,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee2427732d1542c7b9ca584b07c5c38f 2024-11-23T03:23:49,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/4b734440f88547999449951199ee5abc 2024-11-23T03:23:49,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/552c737684b3471e8fb10fbb3ac84743 2024-11-23T03:23:49,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8f36de8c3fda481ab38206bc5eed64d7 2024-11-23T03:23:49,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/ee97327f2768478d9f4e1707f96b4446 2024-11-23T03:23:49,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/82148f723e324a3fb7b8e56592ea6ebd 2024-11-23T03:23:49,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/49bdad951e8a497d95b57d42755e5c93 2024-11-23T03:23:49,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/19a9c370e22c45ccb81659deb7d2bccd 2024-11-23T03:23:49,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2a77e033a1f6429897f8a54bd920daa7 2024-11-23T03:23:49,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/09af06b903ca4e1a85576b4e6e22e285 2024-11-23T03:23:49,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8acf3c61f78d427b9f3dbb6148a243f8 2024-11-23T03:23:49,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/d9ecdaafaecd41ba93faa5ce8a48138c 2024-11-23T03:23:49,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1bd980e072554df48d2ee09f4ac20016 2024-11-23T03:23:49,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/90ad14aab6d2475f962ccd3ca9d532b0 2024-11-23T03:23:49,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/991cd09c91904decac89bea7a684c01b 2024-11-23T03:23:49,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/1b76d51d719745179516b504031162dc 2024-11-23T03:23:49,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/8ef237dc4b4c430ca955b8891c040464 2024-11-23T03:23:49,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/77b236a44f5d4042b1607418bcb92907 2024-11-23T03:23:49,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/62076761393343f399b208df9b7647e2 2024-11-23T03:23:49,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/e7cbf96082bb490eb5090d98d0e6e42f 2024-11-23T03:23:49,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b22a498b8de74e0cb8f9c7da78c82bb9 2024-11-23T03:23:49,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a1f214c8d594a40882b7d34a360b3e9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/643dfa6ccf6e4b1a847db73527b541c6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f7ecba21e769401c804c33fc021c76a4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/478f005c6f324be597b8e37c9805d9f6, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/85de7464d0d1499088a9c0154f8cbc20, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/46f3ea076d0c4d838fd9616c1a0f472b, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9c70e49fb34244c1918cb1b8adc85142, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/7847422446854b79b86f5e261c40ca75, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8160ba318d1c4235bfce7d3b17fc28b9, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e] to archive 2024-11-23T03:23:49,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:49,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/5b8013494fe6414ca933c4e11e68817d 2024-11-23T03:23:49,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e5f7d8d8247d4f82b5dd7fb6280f4e2a 2024-11-23T03:23:49,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a1f214c8d594a40882b7d34a360b3e9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a1f214c8d594a40882b7d34a360b3e9 2024-11-23T03:23:49,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/fc84dca1acb94fb787f8d795c7b84d5e 2024-11-23T03:23:49,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/0d8b6889e65e48a397163068a29d9424 2024-11-23T03:23:49,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9a5eadd947bc47e0b677b94a64ec1f00 2024-11-23T03:23:49,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/643dfa6ccf6e4b1a847db73527b541c6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/643dfa6ccf6e4b1a847db73527b541c6 2024-11-23T03:23:49,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8062376aacbf4d7f907dc3ce2eb4429c 2024-11-23T03:23:49,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8c9706d9092c4ef190a5ed0f2a2da8d3 2024-11-23T03:23:49,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/080a271e5b314afb968c9c684b6dde6e 2024-11-23T03:23:49,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f7ecba21e769401c804c33fc021c76a4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f7ecba21e769401c804c33fc021c76a4 2024-11-23T03:23:49,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/4fd90677428945e7bc29eb1f992a8a3f 2024-11-23T03:23:49,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8dd548c7fbd542459c3e467e6e5be6f1 2024-11-23T03:23:49,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/478f005c6f324be597b8e37c9805d9f6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/478f005c6f324be597b8e37c9805d9f6 2024-11-23T03:23:49,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/f32cf223bbef4208bc419206f10efee5 2024-11-23T03:23:49,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/47de8098ed8f4b8391c8ee5cb4650e8f 2024-11-23T03:23:49,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/85de7464d0d1499088a9c0154f8cbc20 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/85de7464d0d1499088a9c0154f8cbc20 2024-11-23T03:23:49,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/a5da4efae44f426aa61b33c77d06f4c8 2024-11-23T03:23:49,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/d126baf85a03421b98b170abced325fd 2024-11-23T03:23:49,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/46f3ea076d0c4d838fd9616c1a0f472b to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/46f3ea076d0c4d838fd9616c1a0f472b 2024-11-23T03:23:49,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6826924e77c1406e93bbaf9406894fab 2024-11-23T03:23:49,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/c753a74d4b4a41e691806a14fe0607be 2024-11-23T03:23:49,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9c70e49fb34244c1918cb1b8adc85142 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9c70e49fb34244c1918cb1b8adc85142 2024-11-23T03:23:49,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/81b39b7ba63e41b2ba6e7059ca892c83 2024-11-23T03:23:49,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/9bd0a1d619ed465bb3bdd9ceb4cc7e2c 2024-11-23T03:23:49,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/7847422446854b79b86f5e261c40ca75 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/7847422446854b79b86f5e261c40ca75 2024-11-23T03:23:49,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/94f681a1af89458286fa083dd8df0660 2024-11-23T03:23:49,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/ed7d9d88e7c34f51961204586cd8c745 2024-11-23T03:23:49,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8160ba318d1c4235bfce7d3b17fc28b9 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/8160ba318d1c4235bfce7d3b17fc28b9 2024-11-23T03:23:49,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/40002de8b09442f384b3b25fe43b2acf 2024-11-23T03:23:49,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e4c360d513584abf8b311ed9955a994a 2024-11-23T03:23:49,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/821f25ab4b224c4da8165d1e36600a42 2024-11-23T03:23:49,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/27cb25536e6f4c9b82047f5543f8ec6e 2024-11-23T03:23:49,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/74be9a7a00b14b1c9080f6f7a41d74b4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d9aae13c0cbf4284a2a031faa35ccc10, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2deb66fae83f4d34a4d6b53bc0f288d1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4183b362d5a54c21935a1606467ac1b3, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ee9ce258cea94fcfb64306a67d676c47, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/44564673066d414bb660f1d4ffc5765a, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/31c28926d19947c098364b17837cefd4, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d18b6c344c4945aaa2f602e0b91127e1, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d7ec503df18c4bfdb9801c3d4af8e688, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764] to archive 2024-11-23T03:23:49,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T03:23:49,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2be0c590876447d78b7f0c7f00f74ca7 2024-11-23T03:23:49,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ba1b1b78e7924cc3bff2b5b3f68a9942 2024-11-23T03:23:49,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/74be9a7a00b14b1c9080f6f7a41d74b4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/74be9a7a00b14b1c9080f6f7a41d74b4 2024-11-23T03:23:49,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1f26faf2bc5743049517c198bb2eb76d 2024-11-23T03:23:49,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7adc88bfb22e46e8b3bf8aed3c300e78 2024-11-23T03:23:49,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/fb2cdd62030d4148bbd66c808283ed72 2024-11-23T03:23:49,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d9aae13c0cbf4284a2a031faa35ccc10 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d9aae13c0cbf4284a2a031faa35ccc10 2024-11-23T03:23:49,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5d6e68e462c642a0a56eca93299e3373 2024-11-23T03:23:49,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/3bebdd67e9794226b582c1b6acf92661 2024-11-23T03:23:49,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/1b871c78c01a4bceaf8d1d2dfa418caa 2024-11-23T03:23:49,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2deb66fae83f4d34a4d6b53bc0f288d1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/2deb66fae83f4d34a4d6b53bc0f288d1 2024-11-23T03:23:49,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/a08291b3593b4e529a21364dbb7418bf 2024-11-23T03:23:49,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/784c2400359045a28718769a0e09f7fc 2024-11-23T03:23:49,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4183b362d5a54c21935a1606467ac1b3 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4183b362d5a54c21935a1606467ac1b3 2024-11-23T03:23:49,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/655df039d757483a91fed56be81e4264 2024-11-23T03:23:49,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/65fe5fc6fe3a45059fbccb8f1a7b06fd 2024-11-23T03:23:49,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ee9ce258cea94fcfb64306a67d676c47 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/ee9ce258cea94fcfb64306a67d676c47 2024-11-23T03:23:49,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/5066316a98044c8293cbc2b6f14db5e0 2024-11-23T03:23:49,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/4ffc31911e424abf84de8b2f17654a01 2024-11-23T03:23:49,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/44564673066d414bb660f1d4ffc5765a to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/44564673066d414bb660f1d4ffc5765a 2024-11-23T03:23:49,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e1056ff07f5145e8805fdc50a0b17057 2024-11-23T03:23:49,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7a40f77322b045118ecdc8f745bb90ac 2024-11-23T03:23:49,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/31c28926d19947c098364b17837cefd4 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/31c28926d19947c098364b17837cefd4 2024-11-23T03:23:49,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/46daac584a054d3eaae74f9e103f05bb 2024-11-23T03:23:49,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/479cbd251ab2485a93451d52fe7c4546 2024-11-23T03:23:49,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d18b6c344c4945aaa2f602e0b91127e1 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d18b6c344c4945aaa2f602e0b91127e1 2024-11-23T03:23:49,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/7dc8ac2e47e94fe6b134d82bffeb9bab 2024-11-23T03:23:49,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/e09a32b77209405da9ba216afef0ae2e 2024-11-23T03:23:49,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d7ec503df18c4bfdb9801c3d4af8e688 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/d7ec503df18c4bfdb9801c3d4af8e688 2024-11-23T03:23:49,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/642bed18753f4ca29ee692f6973da992 2024-11-23T03:23:49,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/c47cc9faf71d419f92852fefc345ac9d 2024-11-23T03:23:49,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/603850ab928d43f086af77be66f3bdff 2024-11-23T03:23:49,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/beee334610f44eaba317051388dd3764 2024-11-23T03:23:49,160 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits/505.seqid, newMaxSeqId=505, maxSeqId=4 2024-11-23T03:23:49,160 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684. 2024-11-23T03:23:49,160 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for badf71fee211af0b660972c46cad8684: 2024-11-23T03:23:49,162 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=badf71fee211af0b660972c46cad8684, regionState=CLOSED 2024-11-23T03:23:49,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-23T03:23:49,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseRegionProcedure badf71fee211af0b660972c46cad8684, server=0d51875c74df,34141,1732332039937 in 1.8780 sec 2024-11-23T03:23:49,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-11-23T03:23:49,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=badf71fee211af0b660972c46cad8684, UNASSIGN in 1.8810 sec 2024-11-23T03:23:49,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-23T03:23:49,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8830 sec 2024-11-23T03:23:49,167 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732332229167"}]},"ts":"1732332229167"} 2024-11-23T03:23:49,168 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T03:23:49,169 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T03:23:49,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8930 sec 2024-11-23T03:23:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T03:23:49,382 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-23T03:23:49,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T03:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,384 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T03:23:49,384 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,386 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,388 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C, FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits] 2024-11-23T03:23:49,390 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2bd276cf4a3f47228c735b0b5f0fb490 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/2bd276cf4a3f47228c735b0b5f0fb490 2024-11-23T03:23:49,391 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/549596435d9a48a79a5ddfc825bfa432 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/549596435d9a48a79a5ddfc825bfa432 2024-11-23T03:23:49,391 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b3b861387b5040f19899b5e839048713 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/A/b3b861387b5040f19899b5e839048713 2024-11-23T03:23:49,393 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6a9353d270a3475ea9b6d5921a761656 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/6a9353d270a3475ea9b6d5921a761656 2024-11-23T03:23:49,394 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/76b3cd9783004900b39c1f7e381747f6 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/76b3cd9783004900b39c1f7e381747f6 2024-11-23T03:23:49,395 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e40ac20544ef4ddab55c1144658df896 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/B/e40ac20544ef4ddab55c1144658df896 2024-11-23T03:23:49,396 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/6fde3372a94f4feea9d625c8d3f175ad to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/6fde3372a94f4feea9d625c8d3f175ad 2024-11-23T03:23:49,397 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/9bca2b8dc9f94849a18b74b225199d74 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/9bca2b8dc9f94849a18b74b225199d74 2024-11-23T03:23:49,398 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/eabd099f59734f65aef4c9a19fb5dc11 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/C/eabd099f59734f65aef4c9a19fb5dc11 2024-11-23T03:23:49,400 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits/505.seqid to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684/recovered.edits/505.seqid 2024-11-23T03:23:49,400 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/default/TestAcidGuarantees/badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,400 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T03:23:49,401 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:23:49,401 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T03:23:49,403 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112305741a3844dc4565b7a2e42c4e350f91_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112305741a3844dc4565b7a2e42c4e350f91_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,404 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230657fc0e515b4d31b0706f5491e1b9d7_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230657fc0e515b4d31b0706f5491e1b9d7_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,405 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309671c1505cf40fc81e38db2dc4a48cf_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112309671c1505cf40fc81e38db2dc4a48cf_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,406 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230de11b4929a043759f5bd211ab45eb81_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230de11b4929a043759f5bd211ab45eb81_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,406 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112310105074e36a424daa1163ca6d0b1edb_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112310105074e36a424daa1163ca6d0b1edb_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,407 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231bbb621139c64c62a831d38723f7522b_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231bbb621139c64c62a831d38723f7522b_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,408 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123203bf435e69c48648ea964f404cd4342_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123203bf435e69c48648ea964f404cd4342_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,409 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233a0cdb9a438f499395932d2ec5c26cca_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233a0cdb9a438f499395932d2ec5c26cca_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,410 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235392907560aa4055b3d1b4547ce68af1_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235392907560aa4055b3d1b4547ce68af1_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,410 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357c67fa91e2b40adae1375dc074b8317_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357c67fa91e2b40adae1375dc074b8317_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,411 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c29968ca0804b51929f87faf8a5b995_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c29968ca0804b51929f87faf8a5b995_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,412 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ce0cf11a67543dfae84169a940a7b67_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ce0cf11a67543dfae84169a940a7b67_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,413 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123745ed262a0ed46bda57ae597e9bab75f_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123745ed262a0ed46bda57ae597e9bab75f_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,413 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112378963d4bbeb44ae89aa3ad5fd093795c_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112378963d4bbeb44ae89aa3ad5fd093795c_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,414 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d98af99484b4c9aaa0cb486561e6899_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237d98af99484b4c9aaa0cb486561e6899_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,415 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112388fbecb400154a4c9a3aeda6c7ffc635_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112388fbecb400154a4c9a3aeda6c7ffc635_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,416 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238c8c63a1249c43d3830b92399927970d_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238c8c63a1249c43d3830b92399927970d_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,417 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238f000e2ce9e24def9fe83be08f457e17_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238f000e2ce9e24def9fe83be08f457e17_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,417 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112397589289845d497c9d9ffc09c6da3374_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112397589289845d497c9d9ffc09c6da3374_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,418 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a2a8d0593d87415a99518d4e4c7e07af_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a2a8d0593d87415a99518d4e4c7e07af_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,419 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c538f3bd48bf43f99bf71995c3e50cb7_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c538f3bd48bf43f99bf71995c3e50cb7_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,420 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c548ea18f9e54873966a69321df80fb0_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c548ea18f9e54873966a69321df80fb0_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,420 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cf1ac1466b6f4b9ca98c0f8abf8ff6f3_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cf1ac1466b6f4b9ca98c0f8abf8ff6f3_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,421 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123def1a6b55a204f6da3bf7322ca65e789_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123def1a6b55a204f6da3bf7322ca65e789_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,422 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dfbd2af42ef543b18716c7500e72d8e5_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dfbd2af42ef543b18716c7500e72d8e5_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,423 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e729150cda9f401e9f8b3b90814780dd_badf71fee211af0b660972c46cad8684 to hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e729150cda9f401e9f8b3b90814780dd_badf71fee211af0b660972c46cad8684 2024-11-23T03:23:49,423 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T03:23:49,424 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,426 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T03:23:49,428 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T03:23:49,428 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,429 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T03:23:49,429 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732332229429"}]},"ts":"9223372036854775807"} 2024-11-23T03:23:49,430 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T03:23:49,430 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => badf71fee211af0b660972c46cad8684, NAME => 'TestAcidGuarantees,,1732332202296.badf71fee211af0b660972c46cad8684.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T03:23:49,430 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T03:23:49,430 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732332229430"}]},"ts":"9223372036854775807"} 2024-11-23T03:23:49,431 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T03:23:49,433 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T03:23:49,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 50 msec 2024-11-23T03:23:49,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39215 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T03:23:49,485 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-23T03:23:49,494 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238 (was 240), OpenFileDescriptor=451 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=591 (was 618), ProcessCount=11 (was 11), AvailableMemoryMB=3736 (was 3792) 2024-11-23T03:23:49,494 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-23T03:23:49,494 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T03:23:49,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:61411 2024-11-23T03:23:49,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:49,495 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T03:23:49,495 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=769703085, stopped=false 2024-11-23T03:23:49,495 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=0d51875c74df,39215,1732332039198 2024-11-23T03:23:49,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T03:23:49,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T03:23:49,497 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-23T03:23:49,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:23:49,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:23:49,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:49,497 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0d51875c74df,34141,1732332039937' ***** 2024-11-23T03:23:49,497 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-23T03:23:49,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T03:23:49,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T03:23:49,498 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T03:23:49,498 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-23T03:23:49,498 INFO [RS:0;0d51875c74df:34141 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T03:23:49,498 INFO [RS:0;0d51875c74df:34141 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T03:23:49,498 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(3579): Received CLOSE for 083dc89e8b2b1c4aa6851e27c52fd159 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1224): stopping server 0d51875c74df,34141,1732332039937 2024-11-23T03:23:49,499 DEBUG [RS:0;0d51875c74df:34141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 083dc89e8b2b1c4aa6851e27c52fd159, disabling compactions & flushes 2024-11-23T03:23:49,499 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. after waiting 0 ms 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:23:49,499 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 083dc89e8b2b1c4aa6851e27c52fd159 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-23T03:23:49,499 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-23T03:23:49,499 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 083dc89e8b2b1c4aa6851e27c52fd159=hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159.} 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T03:23:49,499 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T03:23:49,499 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T03:23:49,499 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-23T03:23:49,502 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1629): Waiting on 083dc89e8b2b1c4aa6851e27c52fd159, 1588230740 2024-11-23T03:23:49,515 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/.tmp/info/8b2e9ddaeb8f42479580a87c1902c898 is 45, key is default/info:d/1732332043235/Put/seqid=0 2024-11-23T03:23:49,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742571_1747 (size=5037) 2024-11-23T03:23:49,522 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/info/5bf085aba1ae4cc794a6872902edd9c1 is 143, key is hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159./info:regioninfo/1732332043113/Put/seqid=0 2024-11-23T03:23:49,523 INFO [regionserver/0d51875c74df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T03:23:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742572_1748 (size=7725) 2024-11-23T03:23:49,680 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T03:23:49,703 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1629): Waiting on 083dc89e8b2b1c4aa6851e27c52fd159, 1588230740 2024-11-23T03:23:49,903 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1629): Waiting on 083dc89e8b2b1c4aa6851e27c52fd159, 1588230740 2024-11-23T03:23:49,918 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/.tmp/info/8b2e9ddaeb8f42479580a87c1902c898 2024-11-23T03:23:49,921 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/.tmp/info/8b2e9ddaeb8f42479580a87c1902c898 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/info/8b2e9ddaeb8f42479580a87c1902c898 2024-11-23T03:23:49,923 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/info/8b2e9ddaeb8f42479580a87c1902c898, entries=2, sequenceid=6, filesize=4.9 K 2024-11-23T03:23:49,924 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 083dc89e8b2b1c4aa6851e27c52fd159 in 425ms, sequenceid=6, compaction requested=false 2024-11-23T03:23:49,926 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/info/5bf085aba1ae4cc794a6872902edd9c1 2024-11-23T03:23:49,927 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/namespace/083dc89e8b2b1c4aa6851e27c52fd159/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T03:23:49,927 INFO [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:23:49,927 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 083dc89e8b2b1c4aa6851e27c52fd159: 2024-11-23T03:23:49,927 DEBUG [RS_CLOSE_REGION-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732332041885.083dc89e8b2b1c4aa6851e27c52fd159. 2024-11-23T03:23:49,943 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/rep_barrier/59c2479882b74e32b9e1a0b640324fd6 is 102, key is TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4./rep_barrier:/1732332076371/DeleteFamily/seqid=0 2024-11-23T03:23:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742573_1749 (size=6025) 2024-11-23T03:23:49,988 INFO [regionserver/0d51875c74df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T03:23:49,988 INFO [regionserver/0d51875c74df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T03:23:50,103 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T03:23:50,303 DEBUG [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T03:23:50,346 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/rep_barrier/59c2479882b74e32b9e1a0b640324fd6 2024-11-23T03:23:50,372 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/table/5ec7a3ed1c9b4b77b7cfeebc4694838c is 96, key is TestAcidGuarantees,,1732332043469.4dfbb59dd7b53f05c99615119ca9b6f4./table:/1732332076371/DeleteFamily/seqid=0 2024-11-23T03:23:50,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742574_1750 (size=5942) 2024-11-23T03:23:50,376 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/table/5ec7a3ed1c9b4b77b7cfeebc4694838c 2024-11-23T03:23:50,381 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/info/5bf085aba1ae4cc794a6872902edd9c1 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/info/5bf085aba1ae4cc794a6872902edd9c1 2024-11-23T03:23:50,384 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/info/5bf085aba1ae4cc794a6872902edd9c1, entries=22, sequenceid=93, filesize=7.5 K 2024-11-23T03:23:50,385 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/rep_barrier/59c2479882b74e32b9e1a0b640324fd6 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/rep_barrier/59c2479882b74e32b9e1a0b640324fd6 2024-11-23T03:23:50,387 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/rep_barrier/59c2479882b74e32b9e1a0b640324fd6, entries=6, sequenceid=93, filesize=5.9 K 2024-11-23T03:23:50,388 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/.tmp/table/5ec7a3ed1c9b4b77b7cfeebc4694838c as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/table/5ec7a3ed1c9b4b77b7cfeebc4694838c 2024-11-23T03:23:50,390 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/table/5ec7a3ed1c9b4b77b7cfeebc4694838c, entries=9, sequenceid=93, filesize=5.8 K 2024-11-23T03:23:50,391 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 892ms, sequenceid=93, compaction requested=false 2024-11-23T03:23:50,395 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-23T03:23:50,395 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T03:23:50,395 INFO [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T03:23:50,395 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T03:23:50,395 DEBUG [RS_CLOSE_META-regionserver/0d51875c74df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T03:23:50,504 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1250): stopping server 0d51875c74df,34141,1732332039937; all regions closed. 2024-11-23T03:23:50,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741834_1010 (size=26050) 2024-11-23T03:23:50,510 DEBUG [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/oldWALs 2024-11-23T03:23:50,510 INFO [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0d51875c74df%2C34141%2C1732332039937.meta:.meta(num 1732332041646) 2024-11-23T03:23:50,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741832_1008 (size=17207159) 2024-11-23T03:23:50,515 DEBUG [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/oldWALs 2024-11-23T03:23:50,515 INFO [RS:0;0d51875c74df:34141 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0d51875c74df%2C34141%2C1732332039937:(num 1732332041104) 2024-11-23T03:23:50,515 DEBUG [RS:0;0d51875c74df:34141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:50,515 INFO [RS:0;0d51875c74df:34141 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T03:23:50,515 INFO [RS:0;0d51875c74df:34141 {}] hbase.ChoreService(370): Chore service for: regionserver/0d51875c74df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-23T03:23:50,516 INFO [regionserver/0d51875c74df:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T03:23:50,516 INFO [RS:0;0d51875c74df:34141 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34141 2024-11-23T03:23:50,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d51875c74df,34141,1732332039937 2024-11-23T03:23:50,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T03:23:50,521 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f58b48f1d48@412736c0 rejected from java.util.concurrent.ThreadPoolExecutor@425a080[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-23T03:23:50,522 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d51875c74df,34141,1732332039937] 2024-11-23T03:23:50,522 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0d51875c74df,34141,1732332039937; numProcessing=1 2024-11-23T03:23:50,523 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0d51875c74df,34141,1732332039937 already deleted, retry=false 2024-11-23T03:23:50,523 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0d51875c74df,34141,1732332039937 expired; onlineServers=0 2024-11-23T03:23:50,523 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0d51875c74df,39215,1732332039198' ***** 2024-11-23T03:23:50,523 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T03:23:50,524 DEBUG [M:0;0d51875c74df:39215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51bce639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d51875c74df/172.17.0.2:0 2024-11-23T03:23:50,524 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegionServer(1224): stopping server 0d51875c74df,39215,1732332039198 2024-11-23T03:23:50,524 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegionServer(1250): stopping server 0d51875c74df,39215,1732332039198; all regions closed. 2024-11-23T03:23:50,524 DEBUG [M:0;0d51875c74df:39215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T03:23:50,524 DEBUG [M:0;0d51875c74df:39215 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T03:23:50,524 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T03:23:50,524 DEBUG [M:0;0d51875c74df:39215 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T03:23:50,524 INFO [M:0;0d51875c74df:39215 {}] hbase.ChoreService(370): Chore service for: master/0d51875c74df:0 had [] on shutdown 2024-11-23T03:23:50,524 DEBUG [M:0;0d51875c74df:39215 {}] master.HMaster(1733): Stopping service threads 2024-11-23T03:23:50,525 INFO [M:0;0d51875c74df:39215 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T03:23:50,525 DEBUG [master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.small.0-1732332040808 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.small.0-1732332040808,5,FailOnTimeoutGroup] 2024-11-23T03:23:50,525 DEBUG [master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.large.0-1732332040808 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d51875c74df:0:becomeActiveMaster-HFileCleaner.large.0-1732332040808,5,FailOnTimeoutGroup] 2024-11-23T03:23:50,525 ERROR [M:0;0d51875c74df:39215 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:34981 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34981,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-23T03:23:50,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T03:23:50,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T03:23:50,526 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T03:23:50,526 INFO [M:0;0d51875c74df:39215 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T03:23:50,527 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T03:23:50,527 DEBUG [M:0;0d51875c74df:39215 {}] zookeeper.ZKUtil(347): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T03:23:50,527 WARN [M:0;0d51875c74df:39215 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T03:23:50,527 INFO [M:0;0d51875c74df:39215 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-23T03:23:50,527 INFO [M:0;0d51875c74df:39215 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T03:23:50,527 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T03:23:50,527 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:23:50,527 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:23:50,527 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T03:23:50,527 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:23:50,527 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=801.23 KB heapSize=987.91 KB 2024-11-23T03:23:50,544 DEBUG [M:0;0d51875c74df:39215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/220af92dcc2e4bc3bbd7e46fa147f7ca is 82, key is hbase:meta,,1/info:regioninfo/1732332041781/Put/seqid=0 2024-11-23T03:23:50,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742575_1751 (size=5672) 2024-11-23T03:23:50,548 INFO [M:0;0d51875c74df:39215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2312 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/220af92dcc2e4bc3bbd7e46fa147f7ca 2024-11-23T03:23:50,570 DEBUG [M:0;0d51875c74df:39215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/81fca159f8094b9da30e901b7f6b11e0 is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x9A/proc:d/1732332204327/Put/seqid=0 2024-11-23T03:23:50,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742576_1752 (size=45020) 2024-11-23T03:23:50,590 INFO [M:0;0d51875c74df:39215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=800.67 KB at sequenceid=2312 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/81fca159f8094b9da30e901b7f6b11e0 2024-11-23T03:23:50,595 INFO [M:0;0d51875c74df:39215 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 81fca159f8094b9da30e901b7f6b11e0 2024-11-23T03:23:50,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T03:23:50,623 INFO [RS:0;0d51875c74df:34141 {}] regionserver.HRegionServer(1307): Exiting; stopping=0d51875c74df,34141,1732332039937; zookeeper connection closed. 2024-11-23T03:23:50,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34141-0x1002264e1b00001, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T03:23:50,623 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3037af45 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3037af45 2024-11-23T03:23:50,624 DEBUG [M:0;0d51875c74df:39215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f0de01574d04487923108b7536f69bb is 69, key is 0d51875c74df,34141,1732332039937/rs:state/1732332040866/Put/seqid=0 2024-11-23T03:23:50,624 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T03:23:50,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073742577_1753 (size=5156) 2024-11-23T03:23:51,028 INFO [M:0;0d51875c74df:39215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2312 (bloomFilter=true), to=hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f0de01574d04487923108b7536f69bb 2024-11-23T03:23:51,032 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/220af92dcc2e4bc3bbd7e46fa147f7ca as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/220af92dcc2e4bc3bbd7e46fa147f7ca 2024-11-23T03:23:51,035 INFO [M:0;0d51875c74df:39215 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/220af92dcc2e4bc3bbd7e46fa147f7ca, entries=8, sequenceid=2312, filesize=5.5 K 2024-11-23T03:23:51,036 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/81fca159f8094b9da30e901b7f6b11e0 as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/81fca159f8094b9da30e901b7f6b11e0 2024-11-23T03:23:51,039 INFO [M:0;0d51875c74df:39215 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 81fca159f8094b9da30e901b7f6b11e0 2024-11-23T03:23:51,039 INFO [M:0;0d51875c74df:39215 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/81fca159f8094b9da30e901b7f6b11e0, entries=179, sequenceid=2312, filesize=44.0 K 2024-11-23T03:23:51,040 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f0de01574d04487923108b7536f69bb as hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f0de01574d04487923108b7536f69bb 2024-11-23T03:23:51,043 INFO [M:0;0d51875c74df:39215 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34981/user/jenkins/test-data/a6194646-36b6-153a-378f-cc4ec11ea417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f0de01574d04487923108b7536f69bb, entries=1, sequenceid=2312, filesize=5.0 K 2024-11-23T03:23:51,043 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegion(3040): Finished flush of dataSize ~801.23 KB/820458, heapSize ~987.62 KB/1011320, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 516ms, sequenceid=2312, compaction requested=false 2024-11-23T03:23:51,049 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T03:23:51,050 DEBUG [M:0;0d51875c74df:39215 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T03:23:51,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741830_1006 (size=971977) 2024-11-23T03:23:51,052 INFO [M:0;0d51875c74df:39215 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-23T03:23:51,052 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T03:23:51,052 INFO [M:0;0d51875c74df:39215 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39215 2024-11-23T03:23:51,054 DEBUG [M:0;0d51875c74df:39215 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/0d51875c74df,39215,1732332039198 already deleted, retry=false 2024-11-23T03:23:51,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T03:23:51,156 INFO [M:0;0d51875c74df:39215 {}] regionserver.HRegionServer(1307): Exiting; stopping=0d51875c74df,39215,1732332039198; zookeeper connection closed. 2024-11-23T03:23:51,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39215-0x1002264e1b00000, quorum=127.0.0.1:61411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T03:23:51,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T03:23:51,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T03:23:51,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T03:23:51,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T03:23:51,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.log.dir/,STOPPED} 2024-11-23T03:23:51,169 WARN [BP-1790657337-172.17.0.2-1732332036355 heartbeating to localhost/127.0.0.1:34981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T03:23:51,169 WARN [BP-1790657337-172.17.0.2-1732332036355 heartbeating to localhost/127.0.0.1:34981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1790657337-172.17.0.2-1732332036355 (Datanode Uuid 09c68cfc-715e-4d36-8e8f-79b3dfd79368) service to localhost/127.0.0.1:34981 2024-11-23T03:23:51,171 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T03:23:51,171 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T03:23:51,172 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/dfs/data/data1/current/BP-1790657337-172.17.0.2-1732332036355 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T03:23:51,172 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/cluster_7b750f4e-a63f-fd4b-2318-2c89961f322e/dfs/data/data2/current/BP-1790657337-172.17.0.2-1732332036355 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T03:23:51,173 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T03:23:51,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T03:23:51,182 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T03:23:51,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T03:23:51,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T03:23:51,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2837f349-51a1-bdc0-5100-3a14ba583655/hadoop.log.dir/,STOPPED} 2024-11-23T03:23:51,205 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-23T03:23:51,404 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down